this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

155 lines
6.6 KiB

  1. __author__ = 'Helium'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from typing import List
  4. from Forums.Utilities.utilities import *
  5. from datetime import date
  6. from datetime import timedelta
  7. import re
  8. # Here, we are importing BeautifulSoup to search through the HTML tree
  9. from bs4 import BeautifulSoup, ResultSet, Tag
  10. # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
  11. def HiddenAnswers_description_parser(soup: BeautifulSoup):
  12. topic: str = "-1" # topic name
  13. user: List[str] = [] # all users of each post
  14. addDate: List[datetime] = [] # all dated of each post
  15. feedback: List[str] = [] # all feedbacks of each vendor (this was found in just one Forum and with a number format)
  16. status: List[str] = [] # all user's authority in each post such as (adm, member, dangerous)
  17. reputation: List[str] = [] # all user's karma in each post (usually found as a number)
  18. sign: List[str] = [] # all user's signature in each post (usually a standard message after the content of the post)
  19. post: List[str] = [] # all messages of each post
  20. interest: List[str] = [] # all user's interest in each post
  21. # Finding the topic (should be just one coming from the Listing Page)
  22. li = soup.find("h1").find("span", {"itemprop": "name"})
  23. topic = li.text
  24. question: Tag = soup.find("div", {"class": "qa-part-q-view"})
  25. question_user = question.find("span", {"class": "qa-q-view-who-data"}).text
  26. user.append(cleanString(question_user.strip()))
  27. question_time = question.find("span", {"class": "qa-q-view-when-data"}).find("time").get("datetime")
  28. datetime_string = question_time.split("+")[0]
  29. datetime_obj = datetime.strptime(datetime_string, "%Y-%m-%dT%H:%M:%S")
  30. addDate.append(datetime_obj)
  31. question_user_status = question.find("span", {"class": "qa-q-view-who-title"}).text
  32. status.append(cleanString(question_user_status.strip()))
  33. question_user_karma = question.find("span", {"class": "qa-q-view-who-points-data"}).text
  34. # Convert karma to pure numerical string
  35. if question_user_karma.find("k") > -1:
  36. question_user_karma = str(float(question_user_karma.replace("k", "")) * 1000)
  37. reputation.append(cleanString(question_user_karma.strip()))
  38. question_content = question.find("div", {"class": "qa-q-view-content qa-post-content"}).text
  39. post.append(cleanString(question_content.strip()))
  40. feedback.append("-1")
  41. sign.append("-1")
  42. interest.append("-1")
  43. answer_list: ResultSet[Tag] = soup.find("div", {"class": "qa-a-list"}).find_all("div", {"class": "qa-a-list-item"})
  44. for replies in answer_list:
  45. user_name = replies.find("span", {"class", "qa-a-item-who-data"}).text
  46. user.append(cleanString(user_name.strip()))
  47. date_added = replies.find("span", {"class": "qa-a-item-when"}).find("time", {"itemprop": "dateCreated"}).get('datetime')
  48. date_string = date_added.split("+")[0]
  49. datetime_obj = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S")
  50. addDate.append(datetime_obj)
  51. post_data = replies.find("div", {"class": "qa-a-item-content qa-post-content"}).find("div",{"itemprop":"text"}).text
  52. post.append(cleanString(post_data.strip()))
  53. user_reputations = replies.find("span", {"class", "qa-a-item-who-title"}).text
  54. status.append(cleanString(user_reputations.strip()))
  55. karma = replies.find("span", {"class": "qa-a-item-who-points-data"}).text
  56. # Convert karma to pure numerical string
  57. if karma.find("k") > -1:
  58. karma = str(float(karma.replace("k", "")) * 1000)
  59. reputation.append(cleanString(karma.strip()))
  60. feedback.append("-1")
  61. sign.append("-1")
  62. interest.append("-1")
  63. # Populate the final variable (this should be a list with all fields scraped)
  64. row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)
  65. # Sending the results
  66. return row
  67. def HiddenAnswers_listing_parser(soup: BeautifulSoup):
  68. board = "-1" # board name (the previous level of the topic in the Forum categorization tree.
  69. # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
  70. forum: str = "HiddenAnswers"
  71. nm: int = 0 # this variable should receive the number of topics
  72. topic: List[str] = [] # all topics
  73. user: List[str] = [] # all users of each topic
  74. post: List[int] = [] # number of posts of each topic
  75. view: List[int] = [] # number of views of each topic
  76. addDate: List[str] = [] # when the topic was created (difficult to find)
  77. href: List[str] = [] # this variable should receive all cleaned urls (we will use this to do the merge between
  78. # Listing and Description pages)
  79. # Finding the board
  80. literature = soup.find("div", {"class": "qa-main-heading"}).find("h1")
  81. board = literature.text
  82. queries_by_user: ResultSet[Tag] = soup.find("div", {"class": "qa-q-list"}).find_all("div", {"class": "qa-q-list-item"})
  83. for queries in queries_by_user:
  84. topic_of_query = queries.find("div", {"class": "qa-q-item-title"}).find("a").text
  85. topic.append(cleanString(topic_of_query.strip()))
  86. author = queries.find("span", {"class": "qa-q-item-who-data"}).find("a").text
  87. user.append(cleanString(author.strip()))
  88. num_answers = queries.find("span", {"class": "qa-a-count-data"}).text
  89. post.append(cleanString(num_answers.strip()))
  90. view.append("-1")
  91. date_posted = queries.find("span", {"class": "qa-q-item-when-data"}).text
  92. if date_posted.find("day") > 0:
  93. datetime_obj = datetime.now() - timedelta(days=1)
  94. else:
  95. datetime_obj = datetime.strptime(f"{date_posted} {date.today().year}", "%b %d %Y")
  96. addDate.append(datetime_obj)
  97. #this link will be cleaned
  98. listing_href = queries.find("div", {"class": "qa-q-item-title"}).find("a").get("href")
  99. href.append(listing_href)
  100. #need to change this method
  101. nm = len(topic)
  102. return organizeTopics(forum, nm, board, user, topic, view, post, href, addDate)
  103. #need to change this method
  104. def hiddenanswers_links_parser(soup):
  105. # Returning all links that should be visited by the Crawler
  106. href = []
  107. #print(soup.find('table', {"class": "tborder clear"}).find(
  108. # 'tbody').find_all('tr', {"class": "inline_row"}))
  109. listing = soup.find_all('div', {"class": "qa-q-item-title"})
  110. for a in listing:
  111. link = a.find('a').get('href')
  112. href.append(link)
  113. return href