this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 

271 lines
11 KiB

__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from Forums.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def bestcardingworld_description_parser(soup):
# Fields to be parsed
topic = "-1" # 0 topic name
user = [] # 1 all users of each post
addDate = [] # 2 all dated of each post
feedback = [] # 3 all feedbacks of each user (this was found in just one Forum and with a number format)
status = [] # 4 all user's authority in each post such as (adm, member, dangerous)
reputation = [] # 5 all users's karma in each post (usually found as a number)
sign = [] # 6 all user's signature in each post (usually a standard message after the content of the post)
post = [] # 7 all messages of each post
interest = [] # 8 all user's interest in each post
# Finding the topic (should be just one coming from the Listing Page)
li = soup.find("h2", {"class": "topic-title"})
topic = li.text
topic = topic.replace(",","")
topic = topic.replace("\n","")
topic = cleanString(topic.strip())
# Finding the repeated tag that corresponds to the listing of posts
# posts = soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg"}) + \
# soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg2"})
posts = soup.findAll('div', {"class": "post has-profile bg2"}) + \
soup.findAll('div', {"class": "post has-profile bg1"})
# For each message (post), get all the fields we are interested to:
for ipost in posts:
# Finding a first level of the HTML page
#post_wrapper = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "poster"})
post_wrapper = ipost.find('a', {"class": "username-coloured"})
# Finding the author (user) of the post
#author = post_wrapper.find('h4')
author = post_wrapper.text.strip()
user.append(cleanString(author)) # Remember to clean the problematic characters
# Finding the status of the author
smalltext = ipost.find('dl', {"class": "postprofile"})
# Testing here two possibilities to find this status and combine them
# BestCarding does not have membergroup and postgroup
membergroup = smalltext.find('li', {"class": "membergroup"})
postgroup = smalltext.find('li', {"class": "postgroup"})
if membergroup != None:
membergroup = membergroup.text.strip()
if postgroup != None:
postgroup = postgroup.text.strip()
membergroup = membergroup + " - " + postgroup
else:
if postgroup != None:
membergroup = postgroup.text.strip()
else:
membergroup = "-1"
status.append(cleanString(membergroup))
# Finding the interest of the author
# BestCarding does not have blurb
blurb = smalltext.find('li', {"class": "blurb"})
if blurb != None:
blurb = blurb.text.strip()
else:
blurb = "-1"
interest.append(cleanString(blurb))
# Finding the reputation of the user
# BestCarding does not have karma
karma = smalltext.find('li', {"class": "karma"})
if karma != None:
karma = karma.text
karma = karma.replace("Community Rating: ","")
karma = karma.replace("Karma: ","")
karma = karma.strip()
else:
karma = "-1"
reputation.append(cleanString(karma))
# Getting here another good tag to find the post date, post content and users' signature
postarea = ipost.find('div', {"class": "inner"})
dt = ipost.find('p', {"class": "author"}).text.split('»')[1]
#dt = dt.strip().split()
dt = dt.strip()
date_time_obj = datetime.strptime(dt, '%a %b %d, %Y %I:%M %p')
stime = date_time_obj.strftime('%a %b %d, %Y')
sdate = date_time_obj.strftime('%I:%M %p')
addDate.append(date_time_obj)
# Finding the date of the post
# date_time_obj = datetime.strptime(dt, '%a %b %d, %Y %I:%M %p')
# smalltext = postarea.find('div', {"class": "flow_hidden"}).find('div', {"class": "keyinfo"})\
# .find('div', {"class": "smalltext"})
# sdatetime = smalltext.text
# sdatetime = sdatetime.replace(u"\xab","") # Removing unnecessary characters
# sdatetime = sdatetime.replace(u"\xbb","") # Removing unnecessary characters
# sdatetime = sdatetime.split("on: ") # Removing unnecessary characters
# sdatetime = sdatetime[1].strip()
# stime = sdatetime[:-12:-1] # Finding the time of the post
# stime = stime[::-1]
# sdate = sdatetime.replace(stime,"") # Finding the date of the post
# sdate = sdate.replace(",","")
# sdate = sdate.strip()
# Covert the date of the post that can be informed as: "12 February 2016", "today", "yesterday". We need
# a date format here as "mm/dd/yyyy"
#addDate.append(convertDate(sdate,"english", crawlerDate) + " " + stime)
# Finding the post
inner = postarea.find('div', {"class": "content"})
inner = inner.text.strip()
post.append(cleanString(inner))
# Finding the users's signature
#signature = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "moderatorbar"}).find('div', {"class": "signature"})
signature = ipost.find('div', {"class": "post_wrapper"})
if signature != None:
signature = signature.text.strip()
else:
signature = "-1"
sign.append(cleanString(signature))
# As no information about users's feedback was found, just assign "-1" to the variable
feedback.append("-1")
# Populate the final variable (this should be a list with all fields scraped)
row = (topic, post, user, addDate, feedback, status, reputation, sign, interest)
# Sending the results
return row
# This is the method to parse the Listing Pages (one page with many posts)
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def bestcardingworld_listing_parser(soup):
nm = 0 # this variable should receive the number of topics
topic = [] # 1 all topics
board = "-1" # 2 board name (the previous level of the topic in the Forum categorization tree.
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
view = [] # 3 number of views of each topic
post = [] # 4 number of posts of each topic
user = [] # 5 all users of each topic
addDate = [] # 6 when the topic was created (difficult to find)
href = [] # 16 this variable should receive all cleaned urls (we will use this to do the marge between Listing and Description pages)
# Finding the board (should be just one)
parents = soup.find('ul', {"class": "linklist navlinks"}).findAll('a')
board = parents[1].text + u"->" + parents[2].text
board = board.replace(u"\xbb","")
board = cleanString(board.strip())
# Finding the repeated tag that corresponds to the listing of topics
itopics = soup.find('ul', {"class": "topiclist topics"}).findAll('div',{"class": "list-inner"})
replies = soup.find('ul', {"class": "topiclist topics"}).findAll('dd',{"class": "posts"})
views = soup.find('ul', {"class": "topiclist topics"}).findAll('dd',{"class": "views"})
index = 0
for itopic in itopics:
# For each topic found, the structure to get the rest of the information can be of two types. Testing all of them
# to don't miss any topic
# tds = itopic.findAll('td', {"class": "subject stickybg2"})
#
# if len(tds) > 0:
# tag.append("strong")
# tag.append("subject stickybg2")
# tag.append("stats stickybg")
# else:
# tds = itopic.findAll('td', {"class": "subject windowbg2"})
# if len(tds) > 0:
# tag.append("span")
# tag.append("subject windowbg2")
# tag.append("stats windowbg")
# Adding the topic to the topic list
topics = itopic.find('a', {"class": "topictitle"}).text
topic.append(cleanString(topics))
# Counting how many topics we have found so far
nm = len(topic)
# Adding the url to the list of urls
link = itopic.find('a', {"class": "topictitle"}).get('href')
link = cleanLink(link)
href.append(link)
# Finding the author of the topic
ps = itopic.find('div', {"class":"responsive-hide"}).find('a', {"class": "username-coloured"}).text
author = ps.strip()
user.append(cleanString(author))
# Finding the number of replies
posts = replies[index].text.split()[0]
posts = posts.strip()
post.append(cleanString(posts))
# Finding the number of Views
tview = views[index].text.split()[0]
tview = tview.strip()
view.append(cleanString(tview))
# If no information about when the topic was added, just assign "-1" to the variable
#CryptBB doesn't show when topic was first posted on listing page
dt = itopic.find('div', {"class": "responsive-hide"}).text.split('»')[1]
dt = dt.strip()
date_time_obj = datetime.strptime(dt,'%a %b %d, %Y %I:%M %p')
addDate.append(date_time_obj)
#addDate.append("-1")
index += 1
return organizeTopics("BestCardingWorld", nm, topic, board, view, post, user, addDate, href)
#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def bestcardingworld_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing = soup.find('div', {"class": "forumbg"}).find('ul', {"class": "topiclist topics"}).findAll('li', {"class": "row bg1"}) + \
soup.find('div', {"class": "forumbg"}).find('ul', {"class": "topiclist topics"}).findAll('li', {"class": "row bg2"})
for a in listing:
bae = a.find('a', {"class": "topictitle"}, href=True)
link = bae['href']
href.append(link)
return href