this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

144 lines
5.4 KiB

1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from Forums.Utilities.utilities import *
  4. from datetime import date
  5. from datetime import timedelta
  6. import re
  7. # Here, we are importing BeautifulSoup to search through the HTML tree
  8. from bs4 import BeautifulSoup
  9. # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
  10. def altenens_description_parser(soup):
  11. topic = "-1" # 0 *topic name
  12. user = [] # 1 *all users of each post
  13. status = [] # 2 all user's authority in each post such as (adm, member, dangerous)
  14. reputation = [] # 3 all user's karma in each post (usually found as a number)
  15. interest = [] # 4 all user's interest in each post
  16. sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post)
  17. post = [] # 6 all messages of each post
  18. feedback = [] # 7 all feedbacks of each vendor (this was found in just one Forum and with a number format)
  19. addDate = [] # 8 all dates of each post
  20. topic = soup.find("h1", {"class": "p-title-value"}).text
  21. topic = cleanString(topic.strip())
  22. body = soup.find('div', {"class": "block-container lbContainer"})
  23. iposts = body.find_all('article', {"class": "message message--post js-post js-inlineModContainer"})
  24. for ipost in iposts:
  25. author = ipost.find('h4', {"class": "message-name"}).text
  26. user.append(cleanString(author.strip()))
  27. stat = ipost.find('h5', {"class": "userTitle message-userTitle"}).text
  28. status.append(cleanString(stat.strip()))
  29. bar = ipost.find('div', {"class": "xtr-progress-bar"})
  30. if bar is not None:
  31. rep = bar.find('p').get('data-value')
  32. else:
  33. rep = "-1"
  34. reputation.append(cleanString(rep))
  35. interest.append("-1")
  36. signature = ipost.find('aside', {"class": "message-signature"})
  37. if signature is not None:
  38. signature = signature.text.strip()
  39. else:
  40. signature = "-1"
  41. sign.append(cleanString(signature))
  42. inner = ipost.find('div', {"class": "bbWrapper"}).find(text=True, recursive=False)
  43. if inner is not None:
  44. inner = inner.strip()
  45. else:
  46. inner = "" # cannot use -1 because the post is hidden unless you reply
  47. post.append(cleanString(inner))
  48. feedback.append("-1")
  49. dt = ipost.find('time', {"class": "u-dt"}).get('datetime')
  50. date_time_obj = datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S%z')
  51. addDate.append(date_time_obj)
  52. # Populate the final variable (this should be a list with all fields scraped)
  53. row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)
  54. # Sending the results
  55. return row
  56. # This is the method to parse the Listing Pages (one page with many posts)
  57. def altenens_listing_parser(soup):
  58. nm = 0 # *this variable should receive the number of topics
  59. forum = "Altenens" # 0 *forum name
  60. board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree.
  61. # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
  62. author = [] # 2 *all authors of each topic
  63. topic = [] # 3 *all topics
  64. views = [] # 4 number of views of each topic
  65. posts = [] # 5 number of posts of each topic
  66. href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between
  67. # Listing and Description pages)
  68. addDate = [] # 7 when the topic was created (difficult to find)
  69. board = soup.find('h1', {"class": "p-title-value"}).text
  70. board = cleanString(board.strip())
  71. itopics = soup.find_all('div', {"class": "structItem-cell structItem-cell--main"})
  72. nm = len(itopics)
  73. for itopic in itopics:
  74. topics = itopic.find('div', {"class": "structItem-title"}).text
  75. topic.append(cleanString(topics.strip()))
  76. link = itopic.find('a').get('href')
  77. href.append(link)
  78. user = itopic.find('ul', {"class": "structItem-parts"}).find('a').text
  79. author.append(cleanString(user.strip()))
  80. dt = itopic.find('time', {"class": "u-dt"}).get('datetime')
  81. date_time_obj = datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S%z')
  82. addDate.append(date_time_obj)
  83. itopics = soup.find_all('div', {"class": "structItem-cell structItem-cell--meta"})
  84. for itopic in itopics:
  85. nposts = itopic.find('dl', {"class": "pairs pairs--justified"}).text
  86. nposts = nposts.replace('Replies', '')
  87. nposts = nposts.replace('K', '000')
  88. posts.append(cleanString(nposts))
  89. nviews = itopic.find('dl', {"class": "pairs pairs--justified structItem-minor"}).text
  90. nviews = nviews.replace('Views', '')
  91. nviews = nviews.replace('K', '000')
  92. views.append(cleanString(nviews))
  93. return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate)
  94. def altenens_links_parser(soup):
  95. # Returning all links that should be visited by the Crawler
  96. href = []
  97. listing = soup.find_all('div', {"class": "structItem-cell structItem-cell--main"})
  98. for a in listing:
  99. link = a.find('a', {"class": ""}).get('href')
  100. href.append(link)
  101. return href