this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

176 lines
7.3 KiB

  1. __author__ = 'Helium'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from typing import List
  4. from Forums.Utilities.utilities import *
  5. from datetime import date
  6. from datetime import timedelta
  7. import re
  8. # Here, we are importing BeautifulSoup to search through the HTML tree
  9. from bs4 import BeautifulSoup, ResultSet, Tag
  10. # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
  11. def HiddenAnswers_description_parser(soup: BeautifulSoup):
  12. topic: str = "-1" # topic name
  13. user: List[str] = [] # all users of each post
  14. addDate: List[datetime] = [] # all dated of each post
  15. feedback: List[str] = [] # all feedbacks of each vendor (this was found in just one Forum and with a number format)
  16. status: List[str] = [] # all user's authority in each post such as (adm, member, dangerous)
  17. reputation: List[str] = [] # all user's karma in each post (usually found as a number)
  18. sign: List[str] = [] # all user's signature in each post (usually a standard message after the content of the post)
  19. post: List[str] = [] # all messages of each post
  20. interest: List[str] = [] # all user's interest in each post
  21. # Finding the topic (should be just one coming from the Listing Page)
  22. li = soup.find("h1").find("span", {"itemprop": "name"})
  23. topic = li.text
  24. question: Tag = soup.find("div", {"class": "qa-part-q-view"})
  25. question_user = question.find("span", {"class": "qa-q-view-who-data"}).text
  26. user.append(cleanString(question_user.strip()))
  27. question_time = question.find("span", {"class": "qa-q-view-when-data"}).find("time").get("datetime")
  28. datetime_string = question_time.split("+")[0]
  29. datetime_obj = datetime.strptime(datetime_string, "%Y-%m-%dT%H:%M:%S")
  30. addDate.append(datetime_obj)
  31. try:
  32. question_user_status = question.find("span", {"class": "qa-q-view-who-title"}).text
  33. status.append(cleanString(question_user_status.strip()))
  34. except AttributeError:
  35. status.append("-1")
  36. try:
  37. question_user_karma = question.find("span", {"class": "qa-q-view-who-points-data"}).text
  38. # Convert karma to pure numerical string
  39. if question_user_karma.find("k") > -1:
  40. question_user_karma = str(float(question_user_karma.replace("k", "")) * 1000)
  41. reputation.append(cleanString(question_user_karma.strip()))
  42. except AttributeError:
  43. reputation.append("-1")
  44. question_content = question.find("div", {"class": "qa-q-view-content qa-post-content"}).text
  45. post.append(cleanString(question_content.strip()))
  46. feedback.append("-1")
  47. sign.append("-1")
  48. interest.append("-1")
  49. answer_list: ResultSet[Tag] = soup.find("div", {"class": "qa-a-list"}).find_all("div", {"class": "qa-a-list-item"})
  50. for replies in answer_list:
  51. user_name = replies.find("span", {"class", "qa-a-item-who-data"}).text
  52. user.append(cleanString(user_name.strip()))
  53. date_added = replies.find("span", {"class": "qa-a-item-when"}).find("time", {"itemprop": "dateCreated"}).get('datetime')
  54. date_string = date_added.split("+")[0]
  55. datetime_obj = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S")
  56. addDate.append(datetime_obj)
  57. post_data = replies.find("div", {"class": "qa-a-item-content qa-post-content"}).find("div",{"itemprop":"text"}).text
  58. post.append(cleanString(post_data.strip()))
  59. try:
  60. user_reputations = replies.find("span", {"class", "qa-a-item-who-title"}).text
  61. status.append(cleanString(user_reputations.strip()))
  62. except AttributeError:
  63. status.append("-1")
  64. try:
  65. karma = replies.find("span", {"class": "qa-a-item-who-points-data"}).text
  66. # Convert karma to pure numerical string
  67. if karma.find("k") > -1:
  68. karma = str(float(karma.replace("k", "")) * 1000)
  69. reputation.append(cleanString(karma.strip()))
  70. except AttributeError:
  71. reputation.append("-1")
  72. feedback.append("-1")
  73. sign.append("-1")
  74. interest.append("-1")
  75. # Populate the final variable (this should be a list with all fields scraped)
  76. row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)
  77. # Sending the results
  78. return row
  79. def HiddenAnswers_listing_parser(soup: BeautifulSoup):
  80. board = "-1" # board name (the previous level of the topic in the Forum categorization tree.
  81. # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
  82. forum: str = "HiddenAnswers"
  83. nm: int = 0 # this variable should receive the number of topics
  84. topic: List[str] = [] # all topics
  85. user: List[str] = [] # all users of each topic
  86. post: List[int] = [] # number of posts of each topic
  87. view: List[int] = [] # number of views of each topic
  88. addDate: List[str] = [] # when the topic was created (difficult to find)
  89. href: List[str] = [] # this variable should receive all cleaned urls (we will use this to do the merge between
  90. # Listing and Description pages)
  91. # Finding the board
  92. literature = soup.find("div", {"class": "qa-main-heading"}).find("h1")
  93. board = literature.text
  94. queries_by_user: ResultSet[Tag] = soup.find("div", {"class": "qa-q-list"}).find_all("div", {"class": "qa-q-list-item"})
  95. for queries in queries_by_user:
  96. topic_of_query = queries.find("div", {"class": "qa-q-item-title"}).find("a").text
  97. topic.append(cleanString(topic_of_query.strip()))
  98. author = queries.find("span", {"class": "qa-q-item-who-data"}).text
  99. user.append(cleanString(author.strip()))
  100. num_answers = queries.find("span", {"class": "qa-a-count-data"}).text
  101. post.append(cleanString(num_answers.strip()))
  102. view.append("-1")
  103. date_posted = queries.find("span", {"class": "qa-q-item-when-data"}).text
  104. if date_posted.find("minute") > 0:
  105. minutes_ago = date_posted.split(' ')[0]
  106. datetime_obj = datetime.now() - timedelta(minutes=int(minutes_ago))
  107. elif date_posted.find("day") > 0:
  108. days_ago = date_posted.split(' ')[0]
  109. datetime_obj = datetime.now() - timedelta(days=int(days_ago))
  110. elif bool(re.search(r"\d{4}", date_posted)):
  111. datetime_obj = datetime.strptime(date_posted, "%b %d, %Y")
  112. else:
  113. datetime_obj = datetime.strptime(f"{date_posted}, {date.today().year}", "%b %d, %Y")
  114. addDate.append(datetime_obj)
  115. #this link will be cleaned
  116. listing_href = queries.find("div", {"class": "qa-q-item-title"}).find("a").get("href")
  117. href.append(listing_href)
  118. #need to change this method
  119. nm = len(topic)
  120. return organizeTopics(forum, nm, board, user, topic, view, post, href, addDate)
  121. #need to change this method
  122. def hiddenanswers_links_parser(soup):
  123. # Returning all links that should be visited by the Crawler
  124. href = []
  125. #print(soup.find('table', {"class": "tborder clear"}).find(
  126. # 'tbody').find_all('tr', {"class": "inline_row"}))
  127. listing = soup.find_all('div', {"class": "qa-q-item-title"})
  128. for a in listing:
  129. link = a.find('a').get('href')
  130. href.append(link)
  131. return href