this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

165 lines
6.2 KiB

  1. __author__ = 'Helium'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from Forums.Utilities.utilities import *
  4. from datetime import date
  5. from datetime import timedelta
  6. import re
  7. # Here, we are importing BeautifulSoup to search through the HTML tree
  8. from bs4 import BeautifulSoup
  9. # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
  10. def abyssForums_description_parser(soup):
  11. # Fields to be parsed
  12. topic = "-1" # 0 topic name
  13. user = [] # 1 all users of each post
  14. addDate = [] # 2 all dated of each post
  15. feedback = [] # 3 all feedbacks of each vendor (this was found in just one Forum and with a number format)
  16. status = [] # 4 all user's authority in each post such as (adm, member, dangerous)
  17. reputation = [] # 5 all users's karma in each post (usually found as a number)
  18. sign = [] # 6 all user's signature in each post (usually a standard message after the content of the post)
  19. post = [] # 7 all messages of each post
  20. interest = [] # 8 all user's interest in each post
  21. image_user = [] # 9 all user avatars of each post
  22. image_post = [] # 10 all first images of each post
  23. # Finding the topic (should be just one coming from the Listing Page)
  24. li = soup.find("div", {"class": "page-body"}).find("h2", {"class": "topic-title"})
  25. topic = li.text.replace(",","")
  26. topic = topic.replace("\n","")
  27. topic = cleanString(topic.strip())
  28. regex = re.compile('post has-profile.*')
  29. posts = soup.find_all('div', {"class": regex})
  30. # print(len(posts))
  31. # For each message (post), get all the fields we are interested to:
  32. for ipost in posts:
  33. # Finding the author (user) of the post
  34. author = ipost.find('a', {"class": "username"}).text
  35. user.append(cleanString(author)) # Remember to clean the problematic characters
  36. status.append("-1")
  37. reputation.append("-1")
  38. interest.append("-1")
  39. sign.append("-1")
  40. feedback.append("-1")
  41. image_post.append("-1")
  42. img = ipost.find('dl', {"class": "postprofile"}).find('img')
  43. if img is not None:
  44. img = img.get('src').split('base64,')[-1]
  45. else:
  46. img = "-1"
  47. image_user.append(img)
  48. image_user.append("-1")
  49. date_time_obj = ipost.find('time').attrs
  50. date = date_time_obj['datetime'][0:10]
  51. time = date_time_obj['datetime'][11:19]
  52. date_time_obj = datetime.strptime(date + " " + time, '%Y-%m-%d %H:%M:%S')
  53. addDate.append(date_time_obj)
  54. # Finding the post
  55. inner = ipost.find('div', {"class": "content"})
  56. inner = inner.text.strip()
  57. post.append(cleanString(inner))
  58. # Populate the final variable (this should be a list with all fields scraped)
  59. row = (topic, user, status, reputation, interest, sign, post, feedback, addDate, image_user, image_post)
  60. # Sending the results
  61. return row
  62. # This is the method to parse the Listing Pages (one page with many posts)
  63. def abyssForums_listing_parser(soup: BeautifulSoup):
  64. nm = 0 # this variable should receive the number of topics
  65. forum = "AbyssForum" # 0 *forum name
  66. board = "-1" # 1 board name (the previous level of the topic in the Forum categorization tree.
  67. # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
  68. author = [] # 2 all authors of each topic
  69. topic = [] # 3 all topics
  70. views = [] # 4 number of views of each topic
  71. posts = [] # 5 number of posts of each topic
  72. href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between
  73. addDate = [] # when the topic was created (difficult to find)
  74. image_author = [] # 8 all author avatars used in each topic
  75. # Listing and Description pages)
  76. #finding the board
  77. board = soup.find("h2", {"class": "forum-title"}).text
  78. board = cleanString(board.strip())
  79. type_of_posts = soup.find_all("li", {"class": re.compile("row bg\d")} )
  80. for literature in type_of_posts:
  81. title_of_post = literature.find("a", {"class": "topictitle"}).text
  82. title_of_post = cleanString(title_of_post)
  83. topic.append(title_of_post)
  84. user = literature.find("div", {"class": "topic-poster responsive-hide left-box"}).find("a", {"class": "username"}).text
  85. author.append(user)
  86. num_post = literature.find("dd", {"class": "posts"}).text.replace("Replies","").strip()
  87. posts.append(num_post)
  88. num_view = literature.find("dd", {"class": "views"}).text.replace("Views","").strip()
  89. views.append(num_view)
  90. #if int(num_post) != 0: join the last user who posted with the author?
  91. # reply = literature.find("dd", {"class": "lastpost"}).find("a", {"class": "username"}).text
  92. # user.append(reply)
  93. date_time_obj = literature.find('time').attrs
  94. date = date_time_obj['datetime'][0:10]
  95. time = date_time_obj['datetime'][11:19]
  96. date_added = datetime.strptime(date + " " + time, '%Y-%m-%d %H:%M:%S')
  97. addDate.append(date_added)
  98. listing_href = literature.find("a", {"class": "topictitle"}).get("href")
  99. href.append(listing_href)
  100. image_author.append("-1")
  101. nm = len(topic)
  102. return organizeTopics(
  103. forum=forum,
  104. nm=nm,
  105. board=board,
  106. author=author,
  107. topic=topic,
  108. views=views,
  109. posts=posts,
  110. href=href,
  111. addDate=addDate,
  112. image_author=image_author
  113. )
  114. def abyssForum_links_parser(soup):
  115. # Returning all links that should be visited by the Crawler
  116. href = []
  117. #print(soup.find('table', {"class": "tborder clear"}).find(
  118. # 'tbody').find_all('tr', {"class": "inline_row"}))
  119. listing = soup.find_all('dl', {"class": "row-item topic_read"})
  120. for a in listing:
  121. link = a.find('div', {"class": "list-inner"}).find('a').get('href')
  122. href.append(link)
  123. return href