__author__ = 'Helium'
|
|
|
|
# Here, we are importing the auxiliary functions to clean or convert data
|
|
from Forums.Utilities.utilities import *
|
|
from datetime import date
|
|
from datetime import timedelta
|
|
import re
|
|
|
|
# Here, we are importing BeautifulSoup to search through the HTML tree
|
|
from bs4 import BeautifulSoup, ResultSet, Tag
|
|
|
|
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
|
|
|
|
|
|
def procrax_description_parser(soup: Tag):
|
|
|
|
# Fields to be parsed
|
|
|
|
topic = "-1" # 0 topic name
|
|
user = [] # 1 all users of each post
|
|
addDate = [] # 2 all dated of each post
|
|
feedback = [] # 3 all feedbacks of each vendor (this was found in just one Forum and with a number format)
|
|
status = [] # 4 all user's authority in each post such as (adm, member, dangerous)
|
|
reputation = [] # 5 all user's karma in each post (usually found as a number)
|
|
sign = [] # 6 all user's signature in each post (usually a standard message after the content of the post)
|
|
post = [] # 7 all messages of each post
|
|
interest = [] # 8 all user's interest in each post
|
|
image_user = [] # 9 all user avatars of each post
|
|
image_post = [] # 10 all first images of each post
|
|
|
|
# Finding the topic (should be just one coming from the Listing Page)
|
|
|
|
li = soup.find("h1", {"class": "p-title-value"})
|
|
topic = li.text
|
|
|
|
thread: ResultSet[Tag] = soup.find("div", {"class": "block-body js-replyNewMessageContainer"}).find_all("article", {"data-author": True})
|
|
|
|
for ipost in thread:
|
|
username = ipost.find("h4", {"class": "message-name"}).text
|
|
user.append(cleanString(username.strip()))
|
|
|
|
date_posted = ipost.find("ul", {"class": "message-attribution-main listInline"}).find("time").get("datetime")
|
|
datetime_obj = datetime.strptime(date_posted, "%Y-%m-%dT%H:%M:%S%z")
|
|
addDate.append(datetime_obj)
|
|
|
|
feedback.append("-1")
|
|
|
|
user_status = ipost.find("h5", {"class": "userTitle message-userTitle"}).text
|
|
status.append(cleanString(user_status.strip()))
|
|
|
|
user_lvl = ipost.find("div", {"class": "afAwardLevel"}).text
|
|
reputation.append(cleanString(user_lvl.strip()))
|
|
|
|
sign.append("-1")
|
|
|
|
user_post = ipost.find("article", {"class": "message-body js-selectToQuote"}).text
|
|
post.append(cleanString(user_post.strip()))
|
|
|
|
interest.append("-1")
|
|
|
|
bbWrapper = ipost.find('div', {"class": "bbWrapper"})
|
|
if bbWrapper is not None:
|
|
img = bbWrapper.find('img')
|
|
if img is not None:
|
|
img = img.get('src').split('base64,')[-1]
|
|
else:
|
|
img = "-1"
|
|
else:
|
|
img = "-1"
|
|
image_post.append(img)
|
|
|
|
avatar = ipost.find("a", {"class": "avatar avatar--m"})
|
|
if avatar is not None:
|
|
img = avatar.find('img')
|
|
if img is not None:
|
|
img = img.get('src').split('base64,')[-1]
|
|
else:
|
|
img = "-1"
|
|
else:
|
|
img = "-1"
|
|
image_user.append(img)
|
|
|
|
# Populate the final variable (this should be a list with all fields scraped)
|
|
|
|
row = (topic, user, status, reputation, interest, sign, post, feedback, addDate, image_user, image_post)
|
|
|
|
# Sending the results
|
|
|
|
return row
|
|
|
|
# This is the method to parse the Listing Pages (one page with many posts)
|
|
|
|
def procrax_listing_parser(soup: Tag):
|
|
|
|
nm = 0 # this variable should receive the number of topics
|
|
forum: str = "Procrax" # 0 *forum name
|
|
board = "-1" # 1 board name (the previous level of the topic in the Forum categorization tree.
|
|
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
|
|
|
|
author = [] # 2 all authors of each topic
|
|
topic = [] # 3 all topics
|
|
views = [] # 4 number of views of each topic
|
|
posts = [] # 5 number of posts of each topic
|
|
href = [] # 6this variable should receive all cleaned urls (we will use this to do the marge between
|
|
# Listing and Description pages)
|
|
addDate = [] # 7 when the topic was created (difficult to find)
|
|
image_author = [] # 8 all author avatars used in each topic
|
|
|
|
# Finding the board (should be just one)
|
|
li = soup.find("h1", {"class": "p-title-value"})
|
|
board = cleanString(li.text.strip())
|
|
|
|
threads_list: ResultSet[Tag] = soup.find("div", {"class": "structItemContainer-group js-threadList"}).find_all("div", {"data-author": True})
|
|
|
|
nm = len(threads_list)
|
|
|
|
for thread in threads_list:
|
|
thread_title = thread.find("div", {"class": "structItem-title"}).text
|
|
topic.append(cleanString(thread_title.strip()))
|
|
|
|
author_icon = thread.find('a', {"class": "avatar avatar--s"})
|
|
if author_icon != None:
|
|
author_icon = author_icon.find('img')
|
|
if author_icon != None:
|
|
author_icon = author_icon.get('src')
|
|
author_icon = author_icon.split('base64,')[-1]
|
|
else:
|
|
author_icon = "-1"
|
|
else:
|
|
author_icon = "-1"
|
|
image_author.append(author_icon)
|
|
|
|
thread_author = thread.get("data-author")
|
|
author.append(cleanString(thread_author))
|
|
|
|
thread_views = thread.find("dl", {"class": "pairs pairs--justified structItem-minor"}).find('dd').text
|
|
thread_views = thread_views.lower().replace("k","000")
|
|
views.append(cleanString(thread_views.strip()))
|
|
|
|
thread_replies = thread.find("dl", {"class": "pairs pairs--justified"}).find('dd').text
|
|
# All threads contain one topic post and reply posts
|
|
thread_total_posts = str(1 + int(thread_replies))
|
|
posts.append(thread_total_posts)
|
|
|
|
thread_date = thread.find("li", {"class": "structItem-startDate"}).find("time").get("datetime")
|
|
datetime_obj = datetime.strptime(thread_date, "%Y-%m-%dT%H:%M:%S%z")
|
|
addDate.append(datetime_obj)
|
|
|
|
thread_link: str = thread.find("div", {"class": "structItem-title"}).find('a').get('href')
|
|
href.append(thread_link)
|
|
|
|
|
|
return organizeTopics(
|
|
forum=forum,
|
|
nm=nm,
|
|
board=board,
|
|
author=author,
|
|
topic=topic,
|
|
views=views,
|
|
posts=posts,
|
|
addDate=addDate,
|
|
href=href,
|
|
image_author=image_author
|
|
)
|
|
|
|
|
|
def procrax_links_parser(soup):
|
|
|
|
# Returning all links that should be visited by the Crawler
|
|
|
|
href = []
|
|
|
|
listing = soup.find_all('div', {"class": "structItem-title"})
|
|
|
|
for a in listing:
|
|
link = a.find('a', {'class': ''}).get('href')
|
|
|
|
href.append(link)
|
|
|
|
return href
|