this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

164 lines
6.2 KiB

1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from Forums.Utilities.utilities import *
  4. from datetime import date
  5. from datetime import timedelta
  6. import re
  7. # Here, we are importing BeautifulSoup to search through the HTML tree
  8. from bs4 import BeautifulSoup
  9. # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
  10. def altenens_description_parser(soup):
  11. topic = "-1" # 0 *topic name
  12. user = [] # 1 *all users of each post
  13. status = [] # 2 all user's authority in each post such as (adm, member, dangerous)
  14. reputation = [] # 3 all user's karma in each post (usually found as a number)
  15. interest = [] # 4 all user's interest in each post
  16. sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post)
  17. post = [] # 6 all messages of each post
  18. feedback = [] # 7 all feedbacks of each vendor (this was found in just one Forum and with a number format)
  19. addDate = [] # 8 all dates of each post
  20. image_user = [] # 9 all user avatars of each post
  21. image_post = [] # 10 all first images of each post
  22. etopic = soup.find("h1", {"class": "p-title-value"})
  23. if etopic is not None:
  24. topic = etopic.text
  25. topic = cleanString(topic.strip())
  26. body = soup.find('div', {"class": "block-container lbContainer"})
  27. iposts = body.find_all('article', {"class": "message message--post js-post js-inlineModContainer"})
  28. for ipost in iposts:
  29. author = ipost.find('h4', {"class": "message-name"}).text
  30. user.append(cleanString(author.strip()))
  31. stat = ipost.find('h5', {"class": "userTitle message-userTitle"}).text
  32. status.append(cleanString(stat.strip()))
  33. bar = ipost.find('div', {"class": "xtr-progress-bar"})
  34. if bar is not None:
  35. rep = bar.find('p').get('data-value')
  36. else:
  37. rep = "-1"
  38. reputation.append(cleanString(rep))
  39. interest.append("-1")
  40. signature = ipost.find('aside', {"class": "message-signature"})
  41. if signature is not None:
  42. signature = signature.text.strip()
  43. else:
  44. signature = "-1"
  45. sign.append(cleanString(signature))
  46. inner = ipost.find('div', {"class": "bbWrapper"}).find(text=True, recursive=False)
  47. if inner is not None:
  48. inner = inner.strip()
  49. else:
  50. inner = "" # cannot use -1 because the post is hidden unless you reply
  51. post.append(cleanString(inner))
  52. feedback.append("-1")
  53. dt = ipost.find('time', {"class": "u-dt"}).get('datetime')
  54. date_time_obj = datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S%z')
  55. addDate.append(date_time_obj)
  56. img = ipost.find('div', {"class": "message-avatar-wrapper"}).find('img')
  57. if img is not None:
  58. img = img.get('src').split('base64,')[-1]
  59. else:
  60. img = "-1"
  61. image_user.append(img)
  62. image_post.append("-1")
  63. # Populate the final variable (this should be a list with all fields scraped)
  64. row = (topic, user, status, reputation, interest, sign, post, feedback, addDate, image_user, image_post)
  65. # Sending the results
  66. return row
  67. # This is the method to parse the Listing Pages (one page with many posts)
  68. def altenens_listing_parser(soup):
  69. nm = 0 # *this variable should receive the number of topics
  70. forum = "Altenens" # 0 *forum name
  71. board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree.
  72. # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
  73. author = [] # 2 *all authors of each topic
  74. topic = [] # 3 *all topics
  75. views = [] # 4 number of views of each topic
  76. posts = [] # 5 number of posts of each topic
  77. href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between
  78. # Listing and Description pages)
  79. addDate = [] # 7 when the topic was created (difficult to find)
  80. image_author = [] # 8 all author avatars used in each topic
  81. board = soup.find('h1', {"class": "p-title-value"}).text
  82. board = cleanString(board.strip())
  83. regex = re.compile('structItem structItem--thread.*')
  84. itopics = soup.find_all('div', {"class": regex})
  85. nm = len(itopics)
  86. for itopic in itopics:
  87. topics = itopic.find('div', {"class": "structItem-title"}).text
  88. topic.append(cleanString(topics.strip()))
  89. author_icon = itopic.find('a', {"class": "avatar avatar--s"})
  90. if author_icon != None:
  91. author_icon = author_icon.find('img')
  92. author_icon = author_icon.get('src')
  93. author_icon = author_icon.split('base64,')[-1]
  94. else:
  95. author_icon = "-1"
  96. image_author.append(author_icon)
  97. link = itopic.find('div', {"class": "structItem-title"}).find('a').get('href')
  98. href.append(link)
  99. user = itopic.find('ul', {"class": "structItem-parts"}).find('a').text
  100. author.append(cleanString(user.strip()))
  101. dt = itopic.find('time', {"class": "u-dt"}).get('datetime')
  102. date_time_obj = datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S%z')
  103. addDate.append(date_time_obj)
  104. nposts = itopic.find('dl', {"class": "pairs pairs--justified"}).text
  105. nposts = nposts.replace('Replies', '')
  106. nposts = nposts.replace('K', '000')
  107. posts.append(cleanString(nposts))
  108. nviews = itopic.find('dl', {"class": "pairs pairs--justified structItem-minor"}).text
  109. nviews = nviews.replace('Views', '')
  110. nviews = nviews.replace('K', '000')
  111. views.append(cleanString(nviews))
  112. return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate, image_author)
  113. def altenens_links_parser(soup):
  114. # Returning all links that should be visited by the Crawler
  115. href = []
  116. listing = soup.find_all('div', {"class": "structItem-cell structItem-cell--main"})
  117. for a in listing:
  118. link = a.find('a', {"class": ""}).get('href')
  119. href.append(link)
  120. return href