__author__ = 'DarkWeb'
|
|
|
|
# Here, we are importing the auxiliary functions to clean or convert data
|
|
from Forums.Utilities.utilities import *
|
|
|
|
# Here, we are importing BeautifulSoup to search through the HTML tree
|
|
from bs4 import BeautifulSoup
|
|
|
|
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
|
|
def helium_description_parser(soup):
|
|
|
|
# Fields to be parsed
|
|
|
|
topic = "-1" # topic name
|
|
user = [] # all users of each post
|
|
addDate = [] # all dated of each post
|
|
feedback = [] # all feedbacks of each vendor (this was found in just one Forum and with a number format)
|
|
status = [] # all user's authority in each post such as (adm, member, dangerous)
|
|
reputation = [] # all users's karma in each post (usually found as a number)
|
|
sign = [] # all user's signature in each post (usually a standard message after the content of the post)
|
|
post = [] # all messages of each post
|
|
interest = [] # all user's interest in each post
|
|
|
|
# Finding the topic (should be just one coming from the Listing Page)
|
|
|
|
li = soup.find("h4", {"class": "text-truncated"})
|
|
topic = li.text
|
|
topic = topic.replace("Topic:", "")
|
|
topic = topic.replace("Post Reply", "")
|
|
topic = topic.replace(",", "")
|
|
topic = topic.replace("\n", "")
|
|
topic = cleanString(topic.strip())
|
|
|
|
# Finding the repeated tag that corresponds to the listing of posts
|
|
|
|
posts = soup.findAll('div', {"id": "a9"})
|
|
|
|
# For each message (post), get all the fields we are interested to:
|
|
|
|
for ipost in posts:
|
|
|
|
# Finding a first level of the HTML page
|
|
|
|
# Finding the author (user) of the post
|
|
|
|
heading = ipost.find('div', {"class": "panel-heading"})
|
|
title = heading.find('div', {"class": "panel-title"}).text
|
|
author = title.replace("User:", "")
|
|
author = author.strip()
|
|
user.append(cleanString(author)) # Remember to clean the problematic characters
|
|
|
|
# Finding the status of the author
|
|
# Testing here two possibilities to find this status and combine them
|
|
# Helium does not have membergroup and postgroup
|
|
|
|
membergroup = heading.find('li', {"class": "membergroup"})
|
|
postgroup = heading.find('li', {"class": "postgroup"})
|
|
if membergroup != None:
|
|
membergroup = membergroup.text.strip()
|
|
if postgroup != None:
|
|
postgroup = postgroup.text.strip()
|
|
membergroup = membergroup + " - " + postgroup
|
|
else:
|
|
if postgroup != None:
|
|
membergroup = postgroup.text.strip()
|
|
else:
|
|
membergroup = "-1"
|
|
status.append(cleanString(membergroup))
|
|
|
|
# Finding the interest of the author
|
|
# Helium does not have blurb
|
|
|
|
blurb = heading.find('li', {"class": "blurb"})
|
|
if blurb != None:
|
|
blurb = blurb.text.strip()
|
|
else:
|
|
blurb = "-1"
|
|
interest.append(cleanString(blurb))
|
|
|
|
# Finding the reputation of the user
|
|
# Helium does not have karma
|
|
|
|
karma = heading.find('li', {"class": "karma"})
|
|
if karma != None:
|
|
karma = karma.text
|
|
karma = karma.replace("Community Rating: ","")
|
|
karma = karma.replace("Karma: ","")
|
|
karma = karma.strip()
|
|
else:
|
|
karma = "-1"
|
|
reputation.append(cleanString(karma))
|
|
|
|
# Getting here another good tag to find the post date, post content and users' signature
|
|
|
|
postarea = ipost.find('div', {"class": "content_body"})
|
|
|
|
# Finding the date of the post
|
|
# Helium does not have date
|
|
|
|
addDate.append("-1")
|
|
|
|
# dt = ipost.find('p', {"class": "author"}).text.split('»')[1]
|
|
# # dt = dt.strip().split()
|
|
# dt = dt.strip()
|
|
# date_time_obj = datetime.strptime(dt, '%a %b %d, %Y %I:%M %p')
|
|
# stime = date_time_obj.strftime('%a %b %d, %Y')
|
|
# sdate = date_time_obj.strftime('%I:%M %p')
|
|
# addDate.append(date_time_obj)
|
|
|
|
# date_time_obj = datetime.strptime(dt, '%a %b %d, %Y %I:%M %p')
|
|
# smalltext = postarea.find('div', {"class": "flow_hidden"}).find('div', {"class": "keyinfo"})\
|
|
# .find('div', {"class": "smalltext"})
|
|
# sdatetime = smalltext.text
|
|
# sdatetime = sdatetime.replace(u"\xab","") # Removing unnecessary characters
|
|
# sdatetime = sdatetime.replace(u"\xbb","") # Removing unnecessary characters
|
|
# sdatetime = sdatetime.split("on: ") # Removing unnecessary characters
|
|
# sdatetime = sdatetime[1].strip()
|
|
# stime = sdatetime[:-12:-1] # Finding the time of the post
|
|
# stime = stime[::-1]
|
|
# sdate = sdatetime.replace(stime,"") # Finding the date of the post
|
|
# sdate = sdate.replace(",","")
|
|
# sdate = sdate.strip()
|
|
|
|
# Covert the date of the post that can be informed as: "12 February 2016", "today", "yesterday". We need
|
|
# a date format here as "mm/dd/yyyy"
|
|
|
|
#addDate.append(convertDate(sdate,"english", crawlerDate) + " " + stime)
|
|
|
|
# Finding the post
|
|
|
|
paragraphs = postarea.find_all('p')
|
|
p = ""
|
|
for paragraph in paragraphs:
|
|
p += paragraph.text.strip() + " "
|
|
quote = postarea.find('div', {"class": "standard_quote"})
|
|
if quote != None:
|
|
q = quote.text.strip()
|
|
p.replace(q, "")
|
|
post.append(cleanString(p.strip()))
|
|
|
|
# Finding the users's signature
|
|
# Helium does not have signature
|
|
|
|
#signature = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "moderatorbar"}).find('div', {"class": "signature"})
|
|
signature = ipost.find('div', {"class": "post_wrapper"})
|
|
if signature != None:
|
|
signature = signature.text.strip()
|
|
else:
|
|
signature = "-1"
|
|
sign.append(cleanString(signature))
|
|
|
|
# As no information about users's feedback was found, just assign "-1" to the variable
|
|
|
|
feedback.append("-1")
|
|
|
|
# Populate the final variable (this should be a list with all fields scraped)
|
|
|
|
row = (topic, post, user, addDate, feedback, status, reputation, sign, interest)
|
|
|
|
# Sending the results
|
|
|
|
return row
|
|
|
|
|
|
# This is the method to parse the Listing Pages (one page with many posts)
|
|
def helium_listing_parser(soup):
|
|
|
|
board = "-1" # board name (the previous level of the topic in the Forum categorization tree.
|
|
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
|
|
|
|
nm = 0 # this variable should receive the number of topics
|
|
topic = [] # all topics
|
|
user = [] # all users of each topic
|
|
post = [] # number of posts of each topic
|
|
view = [] # number of views of each topic
|
|
addDate = [] # when the topic was created (difficult to find)
|
|
href = [] # this variable should receive all cleaned urls (we will use this to do the marge between
|
|
# Listing and Description pages)
|
|
|
|
# Finding the board (should be just one)
|
|
|
|
parents = soup.find('div', {"class": "col-md-12"}).findAll('li')
|
|
board = parents[1].text + u"->" + parents[2].get('title')
|
|
board = board.replace("\n", "")
|
|
board = cleanString(board.strip())
|
|
|
|
# Finding the repeated tag that corresponds to the listing of topics
|
|
|
|
itopics = soup.find('table', {"class": "table"}).find('tbody').findAll('td', {"class": "col-md-8"})
|
|
repliesViews = soup.find('table', {"class": "table"}).find('tbody').findAll('td', {"class": "col-md-2"})
|
|
|
|
# Counting how many topics we have found so far
|
|
|
|
nm = len(itopics)
|
|
|
|
index = 0
|
|
for itopic in itopics:
|
|
|
|
# Adding the topic to the topic list
|
|
|
|
topics = itopic.find('a').get('title')
|
|
topics = topics.replace(",", "")
|
|
topic.append(cleanString(topics.strip()))
|
|
|
|
# Adding the url to the list of urls
|
|
link = itopic.find('a').get('href')
|
|
link = cleanLink(link)
|
|
href.append(link)
|
|
|
|
# Finding the author of the topic
|
|
author = itopic.find('strong').text
|
|
user.append(cleanString(author.strip()))
|
|
|
|
rv = repliesViews[index].find('p').text.split()
|
|
|
|
# Finding the number of replies
|
|
posts = rv[0].replace("Replies", "")
|
|
post.append(cleanString(posts.strip()))
|
|
|
|
# Finding the number of Views
|
|
tview = rv[1].replace("Views", "")
|
|
view.append(cleanString(tview.strip()))
|
|
|
|
# If no information about when the topic was added, just assign "-1" to the variable
|
|
# dt = itopic.find('div', {"class": "responsive-hide"}).text.split('»')[1]
|
|
# dt = dt.strip()
|
|
# date_time_obj = datetime.strptime(dt,'%a %b %d, %Y %I:%M %p')
|
|
# addDate.append(date_time_obj)
|
|
addDate.append("-1")
|
|
index += 1
|
|
|
|
return organizeTopics("Helium", nm, topic, board, view, post, user, addDate, href)
|
|
|
|
|
|
def helium_links_parser(soup):
|
|
|
|
# Returning all links that should be visited by the Crawler
|
|
|
|
href = []
|
|
|
|
listing = soup.find('table', {"class": "table"}).find('tbody').findAll('td', {"class": "col-md-8"})
|
|
|
|
for a in listing:
|
|
bae = a.find('a', href=True)
|
|
link = bae['href']
|
|
href.append(link)
|
|
|
|
return href
|