|
|
@ -1,338 +1,142 @@ |
|
|
|
__author__ = 'Helium' |
|
|
|
|
|
|
|
# Here, we are importing the auxiliary functions to clean or convert data |
|
|
|
from typing import List |
|
|
|
from Forums.Utilities.utilities import * |
|
|
|
from datetime import date |
|
|
|
from datetime import timedelta |
|
|
|
import re |
|
|
|
|
|
|
|
# Here, we are importing BeautifulSoup to search through the HTML tree |
|
|
|
from bs4 import BeautifulSoup |
|
|
|
from bs4 import BeautifulSoup, ResultSet, Tag |
|
|
|
|
|
|
|
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages) |
|
|
|
|
|
|
|
def HiddenAnswers_description_parser(soup: BeautifulSoup): |
|
|
|
topic: str = "-1" # topic name |
|
|
|
user: List[str] = [] # all users of each post |
|
|
|
addDate: List[datetime] = [] # all dated of each post |
|
|
|
feedback: List[str] = [] # all feedbacks of each vendor (this was found in just one Forum and with a number format) |
|
|
|
status: List[str] = [] # all user's authority in each post such as (adm, member, dangerous) |
|
|
|
reputation: List[str] = [] # all user's karma in each post (usually found as a number) |
|
|
|
sign: List[str] = [] # all user's signature in each post (usually a standard message after the content of the post) |
|
|
|
post: List[str] = [] # all messages of each post |
|
|
|
interest: List[str] = [] # all user's interest in each post |
|
|
|
|
|
|
|
# Fields to be parsed |
|
|
|
|
|
|
|
topic = "-1" # topic name |
|
|
|
user = [] # all users of each post |
|
|
|
addDate = [] # all dated of each post |
|
|
|
feedback = [] # all feedbacks of each vendor (this was found in just one Forum and with a number format) |
|
|
|
status = [] # all user's authority in each post such as (adm, member, dangerous) |
|
|
|
reputation = [] # all users's karma in each post (usually found as a number) |
|
|
|
sign = [] # all user's signature in each post (usually a standard message after the content of the post) |
|
|
|
post = [] # all messages of each post |
|
|
|
interest = [] # all user's interest in each post |
|
|
|
|
|
|
|
# Finding the topic (should be just one coming from the Listing Page) |
|
|
|
|
|
|
|
li = soup.find("td", {"class": "thead"}).find('strong') |
|
|
|
li = soup.find("h1").find("span", {"itemprop": "name"}) |
|
|
|
topic = li.text |
|
|
|
topic = re.sub("\[\w*\]", '', topic) |
|
|
|
|
|
|
|
topic = topic.replace(",","") |
|
|
|
topic = topic.replace("\n","") |
|
|
|
topic = cleanString(topic.strip()) |
|
|
|
print(topic) |
|
|
|
# Finding the repeated tag that corresponds to the listing of posts |
|
|
|
|
|
|
|
# posts = soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg"}) + \ |
|
|
|
# soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg2"}) |
|
|
|
|
|
|
|
try: |
|
|
|
posts = soup.find('table', {"class": "tborder tfixed clear"}).find('td', {"id": "posts_container"}).find_all( |
|
|
|
'div', {"class": "post"}) |
|
|
|
# print(len(posts)) |
|
|
|
|
|
|
|
# For each message (post), get all the fields we are interested to: |
|
|
|
|
|
|
|
for ipost in posts: |
|
|
|
|
|
|
|
# Finding a first level of the HTML page |
|
|
|
|
|
|
|
# post_wrapper = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "poster"}) |
|
|
|
post_wrapper = ipost.find('span', {"class": "largetext"}) |
|
|
|
# Finding the author (user) of the post |
|
|
|
|
|
|
|
# author = post_wrapper.find('h4') |
|
|
|
author = post_wrapper.text.strip() |
|
|
|
# print("author " + author) |
|
|
|
user.append(cleanString(author)) # Remember to clean the problematic characters |
|
|
|
|
|
|
|
# Finding the status of the author |
|
|
|
|
|
|
|
smalltext = ipost.find('div', {"class": "post_author"}) |
|
|
|
|
|
|
|
# Testing here two possibilities to find this status and combine them |
|
|
|
if ipost.find('div', {"class": "deleted_post_author"}): |
|
|
|
status.append(-1) |
|
|
|
interest.append(-1) |
|
|
|
reputation.append(-1) |
|
|
|
addDate.append(-1) |
|
|
|
post.append("THIS POST HAS BEEN REMOVED!") |
|
|
|
sign.append(-1) |
|
|
|
feedback.append(-1) |
|
|
|
continue |
|
|
|
|
|
|
|
# CryptBB does have membergroup and postgroup |
|
|
|
|
|
|
|
membergroup = smalltext.find('div', {"class": "profile-rank"}) |
|
|
|
postgroup = smalltext.find('div', {"class": "postgroup"}) |
|
|
|
if membergroup != None: |
|
|
|
membergroup = membergroup.text.strip() |
|
|
|
if postgroup != None: |
|
|
|
postgroup = postgroup.text.strip() |
|
|
|
membergroup = membergroup + " - " + postgroup |
|
|
|
else: |
|
|
|
if postgroup != None: |
|
|
|
membergroup = postgroup.text.strip() |
|
|
|
else: |
|
|
|
membergroup = "-1" |
|
|
|
|
|
|
|
status.append(cleanString(membergroup)) |
|
|
|
# print("status " + cleanString(membergroup)) |
|
|
|
# Finding the interest of the author |
|
|
|
# CryptBB does not have blurb |
|
|
|
blurb = smalltext.find('li', {"class": "blurb"}) |
|
|
|
if blurb != None: |
|
|
|
blurb = blurb.text.strip() |
|
|
|
else: |
|
|
|
blurb = "-1" |
|
|
|
interest.append(cleanString(blurb)) |
|
|
|
|
|
|
|
# Finding the reputation of the user |
|
|
|
# CryptBB does have reputation |
|
|
|
author_stats = smalltext.find('div', {"class": "author_statistics"}) |
|
|
|
karma = author_stats.find('strong') |
|
|
|
if karma != None: |
|
|
|
karma = karma.text |
|
|
|
karma = karma.replace("Community Rating: ", "") |
|
|
|
karma = karma.replace("Karma: ", "") |
|
|
|
karma = karma.strip() |
|
|
|
else: |
|
|
|
karma = "-1" |
|
|
|
reputation.append(cleanString(karma)) |
|
|
|
# print("karma " + cleanString(karma)) |
|
|
|
# Getting here another good tag to find the post date, post content and users' signature |
|
|
|
|
|
|
|
postarea = ipost.find('div', {"class": "post_content"}) |
|
|
|
|
|
|
|
dt = postarea.find('span', {"class": "post_date"}).text |
|
|
|
# dt = dt.strip().split() |
|
|
|
dt = dt.strip() |
|
|
|
day=date.today() |
|
|
|
if "Yesterday" in dt: |
|
|
|
yesterday = day - timedelta(days=1) |
|
|
|
yesterday = yesterday.strftime('%m-%d-%Y') |
|
|
|
stime = dt.replace('Yesterday,','').strip() |
|
|
|
date_time_obj = yesterday+ ', '+stime |
|
|
|
date_time_obj = datetime.strptime(date_time_obj,'%m-%d-%Y, %I:%M %p') |
|
|
|
elif "hours ago" in dt: |
|
|
|
day = day.strftime('%m-%d-%Y') |
|
|
|
date_time_obj = postarea.find('span', {"class": "post_date"}).find('span')['title'] |
|
|
|
date_time_obj = datetime.strptime(date_time_obj, '%m-%d-%Y, %I:%M %p') |
|
|
|
else: |
|
|
|
date_time_obj = datetime.strptime(dt, '%m-%d-%Y, %I:%M %p') |
|
|
|
stime = date_time_obj.strftime('%b %d, %Y') |
|
|
|
sdate = date_time_obj.strftime('%I:%M %p') |
|
|
|
|
|
|
|
|
|
|
|
addDate.append(date_time_obj) |
|
|
|
# print("date " + str(date_time_obj)) |
|
|
|
# Finding the date of the post |
|
|
|
# date_time_obj = datetime.strptime(dt, '%a %b %d, %Y %I:%M %p') |
|
|
|
# smalltext = postarea.find('div', {"class": "flow_hidden"}).find('div', {"class": "keyinfo"})\ |
|
|
|
# .find('div', {"class": "smalltext"}) |
|
|
|
# sdatetime = smalltext.text |
|
|
|
# sdatetime = sdatetime.replace(u"\xab","") # Removing unnecessary characters |
|
|
|
# sdatetime = sdatetime.replace(u"\xbb","") # Removing unnecessary characters |
|
|
|
# sdatetime = sdatetime.split("on: ") # Removing unnecessary characters |
|
|
|
# sdatetime = sdatetime[1].strip() |
|
|
|
# stime = sdatetime[:-12:-1] # Finding the time of the post |
|
|
|
# stime = stime[::-1] |
|
|
|
# sdate = sdatetime.replace(stime,"") # Finding the date of the post |
|
|
|
# sdate = sdate.replace(",","") |
|
|
|
# sdate = sdate.strip() |
|
|
|
|
|
|
|
# Covert the date of the post that can be informed as: "12 February 2016", "today", "yesterday". We need |
|
|
|
# a date format here as "mm/dd/yyyy" |
|
|
|
|
|
|
|
# addDate.append(convertDate(sdate,"english", crawlerDate) + " " + stime) |
|
|
|
|
|
|
|
# Finding the post |
|
|
|
|
|
|
|
inner = postarea.find('div', {"class": "post_body scaleimages"}) |
|
|
|
inner = inner.text.strip() |
|
|
|
# print(inner) |
|
|
|
post.append(cleanString(inner)) |
|
|
|
|
|
|
|
# Finding the users's signature |
|
|
|
|
|
|
|
# signature = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "moderatorbar"}).find('div', {"class": "signature"}) |
|
|
|
signature = ipost.find('div', {"class": "signature scaleimages"}) |
|
|
|
if signature != None: |
|
|
|
signature = signature.text.strip() |
|
|
|
# print(signature) |
|
|
|
else: |
|
|
|
signature = "-1" |
|
|
|
sign.append(cleanString(signature)) |
|
|
|
|
|
|
|
# As no information about users's feedback was found, just assign "-1" to the variable |
|
|
|
|
|
|
|
feedback.append("-1") |
|
|
|
except: |
|
|
|
if soup.find('td', {"class": "trow1"}).text == " You do not have permission to access this page. ": |
|
|
|
user.append("-1") |
|
|
|
status.append(-1) |
|
|
|
interest.append(-1) |
|
|
|
reputation.append(-1) |
|
|
|
addDate.append(-1) |
|
|
|
post.append("NO ACCESS TO THIS PAGE!") |
|
|
|
sign.append(-1) |
|
|
|
feedback.append(-1) |
|
|
|
|
|
|
|
question: Tag = soup.find("div", {"class": "qa-part-q-view"}) |
|
|
|
|
|
|
|
question_user = question.find("span", {"class": "qa-q-view-who-data"}).text |
|
|
|
user.append(cleanString(question_user.strip())) |
|
|
|
|
|
|
|
question_time = question.find("span", {"class": "qa-q-view-when-data"}).find("time").get("datetime") |
|
|
|
datetime_string = question_time.split("+")[0] |
|
|
|
datetime_obj = datetime.strptime(datetime_string, "%Y-%m-%dT%H:%M:%S") |
|
|
|
addDate.append(datetime_obj) |
|
|
|
|
|
|
|
question_user_status = question.find("span", {"class": "qa-q-view-who-title"}).text |
|
|
|
status.append(cleanString(question_user_status.strip())) |
|
|
|
|
|
|
|
question_user_karma = question.find("span", {"class": "qa-q-view-who-points-data"}).text |
|
|
|
# Convert karma to pure numerical string |
|
|
|
if question_user_karma.find("k") > -1: |
|
|
|
question_user_karma = str(float(question_user_karma.replace("k", "")) * 1000) |
|
|
|
reputation.append(cleanString(question_user_karma.strip())) |
|
|
|
|
|
|
|
question_content = question.find("div", {"class": "qa-q-view-content qa-post-content"}).text |
|
|
|
post.append(cleanString(question_content.strip())) |
|
|
|
|
|
|
|
feedback.append("-1") |
|
|
|
sign.append("-1") |
|
|
|
interest.append("-1") |
|
|
|
|
|
|
|
|
|
|
|
answer_list: ResultSet[Tag] = soup.find("div", {"class": "qa-a-list"}).find_all("div", {"class": "qa-a-list-item"}) |
|
|
|
|
|
|
|
|
|
|
|
for replies in answer_list: |
|
|
|
user_name = replies.find("span", {"class", "qa-a-item-who-data"}).text |
|
|
|
user.append(cleanString(user_name.strip())) |
|
|
|
|
|
|
|
date_added = replies.find("span", {"class": "qa-a-item-when"}).find("time", {"itemprop": "dateCreated"}).get('datetime') |
|
|
|
date_string = date_added.split("+")[0] |
|
|
|
datetime_obj = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S") |
|
|
|
addDate.append(datetime_obj) |
|
|
|
|
|
|
|
|
|
|
|
post_data = replies.find("div", {"class": "qa-a-item-content qa-post-content"}).find("div",{"itemprop":"text"}).text |
|
|
|
post.append(cleanString(post_data.strip())) |
|
|
|
|
|
|
|
user_reputations = replies.find("span", {"class", "qa-a-item-who-title"}).text |
|
|
|
status.append(cleanString(user_reputations.strip())) |
|
|
|
|
|
|
|
karma = replies.find("span", {"class": "qa-a-item-who-points-data"}).text |
|
|
|
# Convert karma to pure numerical string |
|
|
|
if karma.find("k") > -1: |
|
|
|
karma = str(float(karma.replace("k", "")) * 1000) |
|
|
|
reputation.append(cleanString(karma.strip())) |
|
|
|
|
|
|
|
feedback.append("-1") |
|
|
|
sign.append("-1") |
|
|
|
interest.append("-1") |
|
|
|
|
|
|
|
# Populate the final variable (this should be a list with all fields scraped) |
|
|
|
|
|
|
|
row = (topic, post, user, addDate, feedback, status, reputation, sign, interest) |
|
|
|
row = (topic, user, status, reputation, interest, sign, post, feedback, addDate) |
|
|
|
|
|
|
|
# Sending the results |
|
|
|
|
|
|
|
return row |
|
|
|
|
|
|
|
# This is the method to parse the Listing Pages (one page with many posts) |
|
|
|
|
|
|
|
def onniForums_listing_parser(soup): |
|
|
|
|
|
|
|
board = "-1" # board name (the previous level of the topic in the Forum categorization tree. |
|
|
|
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware) |
|
|
|
|
|
|
|
nm = 0 # this variable should receive the number of topics |
|
|
|
topic = [] # all topics |
|
|
|
user = [] # all users of each topic |
|
|
|
post = [] # number of posts of each topic |
|
|
|
view = [] # number of views of each topic |
|
|
|
addDate = [] # when the topic was created (difficult to find) |
|
|
|
href = [] # this variable should receive all cleaned urls (we will use this to do the marge between |
|
|
|
# Listing and Description pages) |
|
|
|
|
|
|
|
# Finding the board (should be just one) |
|
|
|
|
|
|
|
board = soup.find('span', {"class": "active"}).text |
|
|
|
board = cleanString(board.strip()) |
|
|
|
|
|
|
|
# Finding the repeated tag that corresponds to the listing of topics |
|
|
|
|
|
|
|
itopics = soup.find_all('tr',{"class": "inline_row"}) |
|
|
|
index = 0 |
|
|
|
for itopic in itopics: |
|
|
|
|
|
|
|
# For each topic found, the structure to get the rest of the information can be of two types. Testing all of them |
|
|
|
# to don't miss any topic |
|
|
|
|
|
|
|
|
|
|
|
# Adding the topic to the topic list |
|
|
|
try: |
|
|
|
topics = itopic.find('span', {"class": "subject_old"}).find('a').text |
|
|
|
except: |
|
|
|
topics = itopic.find('span', {"class": "subject_new"}).find('a').text |
|
|
|
topics = re.sub("\[\w*\]", '', topics) |
|
|
|
topic.append(cleanString(topics)) |
|
|
|
|
|
|
|
# Counting how many topics we have found so far |
|
|
|
|
|
|
|
nm = len(topic) |
|
|
|
|
|
|
|
# Adding the url to the list of urls |
|
|
|
try: |
|
|
|
link = itopic.find('span', {"class": "subject_old"}).find('a').get('href') |
|
|
|
except: |
|
|
|
link = itopic.find('span',{"class": "subject_new"}).find('a').get('href') |
|
|
|
link = cleanLink(link) |
|
|
|
href.append(link) |
|
|
|
|
|
|
|
# Finding the author of the topic |
|
|
|
ps = itopic.find('div', {"class":"author smalltext"}).find('a').text |
|
|
|
author = ps.strip() |
|
|
|
user.append(cleanString(author)) |
|
|
|
|
|
|
|
# Finding the number of replies |
|
|
|
columns = itopic.findChildren('td',recursive=False) |
|
|
|
posts = columns[3].text |
|
|
|
|
|
|
|
post.append(cleanString(posts)) |
|
|
|
|
|
|
|
# Finding the number of Views |
|
|
|
tview = columns[4].text |
|
|
|
view.append(cleanString(tview)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# If no information about when the topic was added, just assign "-1" to the variable |
|
|
|
#dt = itopic.find('div', {"class": "responsive-hide"}).text.split('»')[1] |
|
|
|
#dt = dt.strip() |
|
|
|
#date_time_obj = datetime.strptime(dt,'%a %b %d, %Y %I:%M %p') |
|
|
|
#addDate.append(date_time_obj) |
|
|
|
addDate.append("-1") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
index += 1 |
|
|
|
return organizeTopics("CryptBB", nm, topic, board, view, post, user, addDate, href) |
|
|
|
|
|
|
|
# if len(tag) > 0: |
|
|
|
# |
|
|
|
# # Finding the topic |
|
|
|
# |
|
|
|
# tds = tds[0].find(tag[0]) |
|
|
|
# topics = tds.text |
|
|
|
# topics = topics.replace(u"\xbb","") |
|
|
|
# topics = topics.strip() |
|
|
|
# topic.append(cleanString(topics)) |
|
|
|
# |
|
|
|
# # Counting how many topics we have found so far |
|
|
|
# |
|
|
|
# nm = len(topic) |
|
|
|
# |
|
|
|
# # Adding the url to the list of urls |
|
|
|
# |
|
|
|
# link = tds.findAll('a', href=True) |
|
|
|
# link = link[0].get('href') |
|
|
|
# link = cleanLink(link) |
|
|
|
# href.append(link) |
|
|
|
# |
|
|
|
# # Finding the author of the topic |
|
|
|
# |
|
|
|
# ps = itopic.find('td', {"class": tag[1]}).find('p').find('a') |
|
|
|
# if ps == None: |
|
|
|
# ps = itopic.find('td', {"class": tag[1]}).find('p') |
|
|
|
# ps = ps.text.replace("Started by ","") |
|
|
|
# else: |
|
|
|
# ps = ps.text |
|
|
|
# author = ps.strip() |
|
|
|
# user.append(cleanString(author)) |
|
|
|
# |
|
|
|
# # Finding the number of replies |
|
|
|
# |
|
|
|
# statistics = itopic.find('td', {"class": tag[2]}) |
|
|
|
# statistics = statistics.text |
|
|
|
# statistics = statistics.split("Replies") |
|
|
|
# posts = statistics[0].strip() |
|
|
|
# post.append(cleanString(posts)) |
|
|
|
# |
|
|
|
# # Finding the number of Views |
|
|
|
# |
|
|
|
# views = statistics[1] |
|
|
|
# views = views.replace("Views","") |
|
|
|
# views = views.strip() |
|
|
|
# view.append(cleanString(views)) |
|
|
|
# |
|
|
|
# # As no information about when the topic was added, just assign "-1" to the variable |
|
|
|
# |
|
|
|
# addDate.append("-1") |
|
|
|
|
|
|
|
#return organizeTopics("TheMajesticGarden", nm, topic, board, view, post, user, addDate, href) |
|
|
|
def HiddenAnswers_listing_parser(soup: BeautifulSoup): |
|
|
|
board = "-1" # board name (the previous level of the topic in the Forum categorization tree. |
|
|
|
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware) |
|
|
|
forum: str = "HiddenAnswers" |
|
|
|
nm: int = 0 # this variable should receive the number of topics |
|
|
|
topic: List[str] = [] # all topics |
|
|
|
user: List[str] = [] # all users of each topic |
|
|
|
post: List[int] = [] # number of posts of each topic |
|
|
|
view: List[int] = [] # number of views of each topic |
|
|
|
addDate: List[str] = [] # when the topic was created (difficult to find) |
|
|
|
href: List[str] = [] # this variable should receive all cleaned urls (we will use this to do the merge between |
|
|
|
# Listing and Description pages) |
|
|
|
# Finding the board |
|
|
|
literature = soup.find("div", {"class": "qa-main-heading"}).find("h1") |
|
|
|
board = literature.text |
|
|
|
|
|
|
|
queries_by_user: ResultSet[Tag] = soup.find("div", {"class": "qa-q-list"}).find_all("div", {"class": "qa-q-list-item"}) |
|
|
|
|
|
|
|
for queries in queries_by_user: |
|
|
|
topic_of_query = queries.find("div", {"class": "qa-q-item-title"}).find("a").text |
|
|
|
topic.append(cleanString(topic_of_query.strip())) |
|
|
|
|
|
|
|
author = queries.find("span", {"class": "qa-q-item-who-data"}).find("a").text |
|
|
|
user.append(cleanString(author.strip())) |
|
|
|
|
|
|
|
num_answers = queries.find("span", {"class": "qa-a-count-data"}).text |
|
|
|
post.append(cleanString(num_answers.strip())) |
|
|
|
|
|
|
|
view.append("-1") |
|
|
|
|
|
|
|
date_posted = queries.find("span", {"class": "qa-q-item-when-data"}).text |
|
|
|
|
|
|
|
if date_posted.find("day") > 0: |
|
|
|
datetime_obj = datetime.now() - timedelta(days=1) |
|
|
|
else: |
|
|
|
datetime_obj = datetime.strptime(f"{date_posted} {date.today().year}", "%b %d %Y") |
|
|
|
addDate.append(datetime_obj) |
|
|
|
#this link will be cleaned |
|
|
|
|
|
|
|
listing_href = queries.find("div", {"class": "qa-q-item-title"}).find("a").get("href") |
|
|
|
href.append(listing_href) |
|
|
|
|
|
|
|
#need to change this method |
|
|
|
nm = len(topic) |
|
|
|
return organizeTopics(forum, nm, board, user, topic, view, post, href, addDate) |
|
|
|
|
|
|
|
#need to change this method |
|
|
|
def hiddenanswers_links_parser(soup): |
|
|
|