this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

136 lines
5.6 KiB

  1. __author__ = 'DarkWeb'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from Forums.Utilities.utilities import *
  4. from datetime import date
  5. from datetime import timedelta
  6. import re
  7. # Here, we are importing BeautifulSoup to search through the HTML tree
  8. from bs4 import BeautifulSoup, ResultSet, Tag
  9. # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
  10. def cardingleaks_description_parser(soup: Tag):
  11. # Fields to be parsed
  12. topic = "-1" # 0 *topic name
  13. user = [] # 1 *all users of each post
  14. status = [] # 2 all user's authority in each post such as (adm, member, dangerous)
  15. reputation = [] # 3 all user's karma in each post (usually found as a number)
  16. interest = [] # 4 all user's interest in each post
  17. sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post)
  18. post = [] # 6 all messages of each post
  19. feedback = [] # 7 all feedbacks of each vendor (this was found in just one Forum and with a number format)
  20. addDate = [] # 8 all dates of each post
  21. li = soup.find("h1", {"class": "p-title-value"})
  22. topic = cleanString(li.text.strip())
  23. post_list: ResultSet[Tag] = soup.find("div", {"class": "block-body js-replyNewMessageContainer"}).find_all("article", {"data-author": True})
  24. for ipost in post_list:
  25. username = ipost.get('data-author')
  26. user.append(username)
  27. user_status = ipost.find("h5", {"class": "userTitle message-userTitle"}).text
  28. status.append(cleanString(user_status.strip()))
  29. user_statistics: ResultSet[Tag] = ipost.find("div", {"class": "message-userExtras"}).find_all("dl", {"class": "pairs pairs--justified"})
  30. user_reputation = "-1"
  31. for stat in user_statistics:
  32. data_type = stat.find("span").get("data-original-title")
  33. if data_type == "Points":
  34. user_reputation = stat.find("dd").text
  35. break
  36. reputation.append(cleanString(user_reputation.strip()))
  37. interest.append("-1")
  38. sign.append("-1")
  39. user_post = ipost.find("div", {"class": "message-content js-messageContent"}).text
  40. post.append(cleanString(user_post.strip()))
  41. feedback.append("-1")
  42. datetime_text = ipost.find("ul", {"class": "message-attribution-main listInline"}).find("time").get("datetime")
  43. datetime_obj = datetime.strptime(datetime_text, "%Y-%m-%dT%H:%M:%S%z")
  44. addDate.append(datetime_obj)
  45. # Populate the final variable (this should be a list with all fields scraped)
  46. row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)
  47. # Sending the results
  48. return row
  49. # This is the method to parse the Listing Pages (one page with many posts)
  50. def cardingleaks_listing_parser(soup: Tag):
  51. nm = 0 # *this variable should receive the number of topics
  52. forum = "Cardingleaks" # 0 *forum name
  53. board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree.
  54. # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
  55. author = [] # 2 *all authors of each topic
  56. topic = [] # 3 *all topics
  57. views = [] # 4 number of views of each topic
  58. posts = [] # 5 number of posts of each topic
  59. href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between
  60. # Listing and Description pages)
  61. addDate = [] # 7 when the topic was created (difficult to find)
  62. # Finding the board (should be just one)
  63. li = soup.find("h1", {"class": "p-title-value"})
  64. board = cleanString(li.text.strip())
  65. thread_list: ResultSet[Tag] = soup.find("div", {"class": "structItemContainer-group js-threadList"}).find_all("div", {"data-author": True})
  66. nm = len(thread_list)
  67. for thread in thread_list:
  68. thread_author = thread.get("data-author")
  69. author.append(thread_author)
  70. thread_topic = thread.find("div", {"class": "structItem-title"}).text
  71. topic.append(cleanString(thread_topic.strip()))
  72. thread_view = thread.find("dl", {"class": "pairs pairs--justified structItem-minor"}).find("dd").text
  73. # Context text view count (i.e., 8.8K) to numerical (i.e., 8800)
  74. if thread_view.find("K") > 0:
  75. thread_view = str(int(float(thread_view.replace("K", "")) * 1000))
  76. views.append(thread_view)
  77. thread_posts = thread.find("dl", {"class": "pairs pairs--justified"}).find("dd").text
  78. posts.append(cleanString(thread_posts.strip()))
  79. thread_href = thread.find("div", {"class": "structItem-title"}).find("a").get("href")
  80. href.append(thread_href)
  81. thread_date = thread.find("li", {"class": "structItem-startDate"}).find("time").get("datetime")
  82. datetime_obj = datetime.strptime(thread_date, "%Y-%m-%dT%H:%M:%S%z")
  83. addDate.append(datetime_obj)
  84. return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate)
  85. def cardingleaks_links_parser(soup):
  86. # Returning all links that should be visited by the Crawler
  87. href = []
  88. listing = soup.find_all('div', {"class": "structItem-title"})
  89. for a in listing:
  90. link = a.find('a').get('href')
  91. href.append(link)
  92. return href