this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

263 lines
9.5 KiB

  1. __author__ = 'Helium'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from Forums.Utilities.utilities import *
  4. from datetime import date
  5. from datetime import timedelta
  6. import re
  7. # Here, we are importing BeautifulSoup to search through the HTML tree
  8. from bs4 import BeautifulSoup
  9. # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
  10. def cryptBB_description_parser(soup):
  11. # Fields to be parsed
  12. topic = "-1" # topic name
  13. user = [] # all users of each post
  14. addDate = [] # all dated of each post
  15. feedback = [] # all feedbacks of each vendor (this was found in just one Forum and with a number format)
  16. status = [] # all user's authority in each post such as (adm, member, dangerous)
  17. reputation = [] # all user's karma in each post (usually found as a number)
  18. sign = [] # all user's signature in each post (usually a standard message after the content of the post)
  19. post = [] # all messages of each post
  20. interest = [] # all user's interest in each post
  21. # Finding the topic (should be just one coming from the Listing Page)
  22. li = soup.find("td", {"class": "thead"}).find('strong')
  23. topic = li.text
  24. topic = re.sub("\[\w*\]", '', topic)
  25. topic = topic.replace(",","")
  26. topic = topic.replace("\n","")
  27. topic = cleanString(topic.strip())
  28. # Finding the repeated tag that corresponds to the listing of posts
  29. # try:
  30. posts = soup.find('table', {"class": "tborder tfixed clear"}).find('td', {"id": "posts_container"}).find_all(
  31. 'div', {"class": "post"})
  32. # For each message (post), get all the fields we are interested to:
  33. for ipost in posts:
  34. # Finding a first level of the HTML page
  35. post_wrapper = ipost.find('span', {"class": "largetext"})
  36. # Finding the author (user) of the post
  37. author = post_wrapper.text.strip()
  38. user.append(cleanString(author)) # Remember to clean the problematic characters
  39. # Finding the status of the author
  40. smalltext = ipost.find('div', {"class": "post_author"})
  41. '''
  42. # Testing here two possibilities to find this status and combine them
  43. if ipost.find('div', {"class": "deleted_post_author"}):
  44. status.append(-1)
  45. interest.append(-1)
  46. reputation.append(-1)
  47. addDate.append(-1)
  48. post.append("THIS POST HAS BEEN REMOVED!")
  49. sign.append(-1)
  50. feedback.append(-1)
  51. continue
  52. '''
  53. # CryptBB does have membergroup and postgroup
  54. membergroup = smalltext.find('div', {"class": "profile-rank"})
  55. postgroup = smalltext.find('div', {"class": "postgroup"})
  56. if membergroup != None:
  57. membergroup = membergroup.text.strip()
  58. if postgroup != None:
  59. postgroup = postgroup.text.strip()
  60. membergroup = membergroup + " - " + postgroup
  61. else:
  62. if postgroup != None:
  63. membergroup = postgroup.text.strip()
  64. else:
  65. membergroup = "-1"
  66. status.append(cleanString(membergroup))
  67. # Finding the interest of the author
  68. # CryptBB does not have blurb
  69. blurb = smalltext.find('li', {"class": "blurb"})
  70. if blurb != None:
  71. blurb = blurb.text.strip()
  72. else:
  73. blurb = "-1"
  74. interest.append(cleanString(blurb))
  75. # Finding the reputation of the user
  76. # CryptBB does have reputation
  77. author_stats = smalltext.find('div', {"class": "author_statistics"})
  78. karma = author_stats.find('strong')
  79. if karma != None:
  80. karma = karma.text
  81. karma = karma.replace("Community Rating: ", "")
  82. karma = karma.replace("Karma: ", "")
  83. karma = karma.strip()
  84. else:
  85. karma = "-1"
  86. reputation.append(cleanString(karma))
  87. # Getting here another good tag to find the post date, post content and users' signature
  88. postarea = ipost.find('div', {"class": "post_content"})
  89. dt = postarea.find('span', {"class": "post_date"}).text
  90. # dt = dt.strip().split()
  91. dt = dt.strip()
  92. day=date.today()
  93. if "Yesterday" in dt:
  94. yesterday = day - timedelta(days=1)
  95. yesterday = yesterday.strftime('%m-%d-%Y')
  96. stime = dt.replace('Yesterday,','').strip()
  97. date_time_obj = yesterday+ ', '+stime
  98. date_time_obj = datetime.strptime(date_time_obj,'%m-%d-%Y, %I:%M %p')
  99. elif "hours ago" in dt:
  100. day = day.strftime('%m-%d-%Y')
  101. date_time_obj = postarea.find('span', {"class": "post_date"}).find('span')['title']
  102. date_time_obj = datetime.strptime(date_time_obj, '%m-%d-%Y, %I:%M %p')
  103. else:
  104. date_time_obj = datetime.strptime(dt, '%m-%d-%Y, %I:%M %p')
  105. stime = date_time_obj.strftime('%b %d, %Y')
  106. sdate = date_time_obj.strftime('%I:%M %p')
  107. addDate.append(date_time_obj)
  108. # Finding the post
  109. inner = postarea.find('div', {"class": "post_body scaleimages"})
  110. inner = inner.text.strip()
  111. post.append(cleanString(inner))
  112. # Finding the user's signature
  113. # signature = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "moderatorbar"}).find('div', {"class": "signature"})
  114. signature = ipost.find('div', {"class": "signature scaleimages"})
  115. if signature != None:
  116. signature = signature.text.strip()
  117. # print(signature)
  118. else:
  119. signature = "-1"
  120. sign.append(cleanString(signature))
  121. # As no information about user's feedback was found, just assign "-1" to the variable
  122. feedback.append("-1")
  123. '''
  124. except:
  125. if soup.find('td', {"class": "trow1"}).text == " You do not have permission to access this page. ":
  126. user.append("-1")
  127. status.append(-1)
  128. interest.append(-1)
  129. reputation.append(-1)
  130. addDate.append(-1)
  131. post.append("NO ACCESS TO THIS PAGE!")
  132. sign.append(-1)
  133. feedback.append(-1)
  134. '''
  135. # Populate the final variable (this should be a list with all fields scraped)
  136. row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)
  137. # Sending the results
  138. return row
  139. # This is the method to parse the Listing Pages (one page with many posts)
  140. def cryptBB_listing_parser(soup):
  141. board = "-1" # board name (the previous level of the topic in the Forum categorization tree.
  142. # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
  143. nm = 0 # this variable should receive the number of topics
  144. topic = [] # all topics
  145. author = [] # all authors of each topic
  146. views = [] # number of views of each topic
  147. posts = [] # number of posts of each topic
  148. addDate = [] # when the topic was created (difficult to find)
  149. href = [] # this variable should receive all cleaned urls (we will use this to do the marge between
  150. # Listing and Description pages)
  151. # Finding the board (should be just one)
  152. board = soup.find('span', {"class": "active"}).text
  153. board = cleanString(board.strip())
  154. # Finding the repeated tag that corresponds to the listing of topics
  155. itopics = soup.find_all('tr',{"class": "inline_row"})
  156. for itopic in itopics:
  157. # For each topic found, the structure to get the rest of the information can be of two types. Testing all of them
  158. # to don't miss any topic
  159. # Adding the topic to the topic list
  160. try:
  161. topics = itopic.find('span', {"class": "subject_old"}).find('a').text
  162. except:
  163. topics = itopic.find('span', {"class": "subject_new"}).find('a').text
  164. topics = re.sub("\[\w*\]", '', topics)
  165. topic.append(cleanString(topics))
  166. # Counting how many topics we have found so far
  167. nm = len(topic)
  168. # Adding the url to the list of urls
  169. try:
  170. link = itopic.find('span', {"class": "subject_old"}).find('a').get('href')
  171. except:
  172. link = itopic.find('span',{"class": "subject_new"}).find('a').get('href')
  173. link = cleanLink(link)
  174. href.append(link)
  175. # Finding the author of the topic
  176. ps = itopic.find('div', {"class":"author smalltext"}).find('a').text
  177. user = ps.strip()
  178. author.append(cleanString(user))
  179. # Finding the number of replies
  180. columns = itopic.findChildren('td',recursive=False)
  181. replies = columns[3].text
  182. posts.append(cleanString(replies))
  183. # Finding the number of Views
  184. tview = columns[4].text
  185. views.append(cleanString(tview))
  186. # If no information about when the topic was added, just assign "-1" to the variable
  187. addDate.append("-1")
  188. return organizeTopics("CryptBB", nm, topic, board, author, views, posts, href, addDate)
  189. def altenens_links_parser(soup):
  190. # Returning all links that should be visited by the Crawler
  191. href = []
  192. listing = soup.find('div', {"class": "structItemContainer-group js-threadList"}).find_all('div', {"class": "structItem structItem--thread is-unread js-inlineModContainer js-threadListItem-1843963"})
  193. for a in listing:
  194. link = a.find('div', {"class": "structItem-title"}).find('a').get('href')
  195. href.append(link)
  196. return href