|
|
- __author__ = 'DarkWeb'
-
- # Here, we are importing the auxiliary functions to clean or convert data
- import datetime
-
- from Forums.Utilities.utilities import *
- from datetime import date
- from datetime import timedelta
- import re
- import traceback
- # Here, we are importing BeautifulSoup to search through the HTML tree
- from bs4 import BeautifulSoup
-
-
- # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
- def dread_description_parser(soup):
-
- # Fields to be parsed
-
- topic = "-1" # 0 *topic name
- user = [] # 1 *all users of each post
- status = [] # 2 all user's authority in each post such as (adm, member, dangerous)
- reputation = [] # 3 all user's karma in each post (usually found as a number)
- interest = [] # 4 all user's interest in each post
- sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post)
- post = [] # 6 all messages of each post
- feedback = [] # 7 all feedbacks of each vendor (this was found in just one Forum and with a number format)
- addDate = [] # 8 all dates of each post
-
- # Finding the topic (should be just one coming from the Listing Page)
- container = soup.find('div', {"class": "content"})
- li = container.find("a", {"class": "title"})
- if li == None:
- return None
- topic = li.text
- topic = topic.replace(u'\xa0', ' ')
- topic = topic.replace(",","")
- topic = topic.replace("\n","")
- topic = cleanString(topic.strip())
- # print(topic)
- # Finding the repeated tag that corresponds to the listing of posts
-
- # posts = soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg"}) + \
- # soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg2"})
-
- # putting the initial post data since it is separated from comments
- # author name
- init_post = container.find('div', {"class": "item"})
- author = init_post.find('div', {"class": "author"}).select_one('a[href^="/u/"]').text
- flair = init_post.find('div', {"class": "author"}).find("span", {"class": "flair"})
- try:
- flair = flair.text.strip()
- author = author.replace(flair, '')
- except:
- pass
- author = author.strip()
- user.append(cleanString(author))
- # status
- flair = init_post.find("span", {"class": "flair"})
- if flair != None:
- flair = flair.text.strip()
- else:
- flair = "-1"
- status.append(cleanString(flair))
- # no blurb
- interest.append(-1)
- # points for post
- karma = init_post.find("div", {"class": "voteCount"})
- if karma != None:
- karma = karma.text
- karma = karma.replace("points", "")
- karma = karma.replace(":", "")
- karma = karma.strip()
- else:
- karma = "-1"
- reputation.append(cleanString(karma))
- # date
- spans = init_post.find('div', {"class": "author"}).find('span', recursive=False)
- dt = spans['title']
- month = find_month(dt)
- split_text = dt.split()
- day = int(re.search(r'\d+', split_text[0]).group())
- year = int(split_text[2])
- hm = re.findall(r'\d+', split_text[-1])
- hm[0] = int(hm[0])
- hm[1] = int(hm[1])
- date_time_obj = datetime(year, month, day, hour=hm[0], minute=hm[1])
- addDate.append(date_time_obj)
-
- # content
- inner = init_post.find("div", {"class": "postContent"})
- inner = inner.text.strip()
- post.append(cleanString(inner))
-
- # no signature
- sign.append(-1)
- # no feedback
- feedback.append(-1)
-
-
- comments = soup.find('div', {"class": "postComments"})
- if comments == None:
- row = (topic, post, user, addDate, feedback, status, reputation, sign, interest)
- return row
- else:
- comments = soup.find('div', {"class": "postComments"}).find_all('div', "comment")
- # print(len(posts))
-
- # For each message (post), get all the fields we are interested to:
-
- for ipost in comments:
-
- # Finding a first level of the HTML page
-
- # post_wrapper = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "poster"})
- cc = ipost.find('div', {"class": "commentContent"})
-
- post_wrapper = cc.find('a', {"class": "username"}).text
- flair = cc.find("span", {"class": "flair"})
- try:
- flair = flair.text.strip()
- post_wrapper = post_wrapper.replace(flair, '')
- except:
- pass
- author = post_wrapper.strip()
- user.append(cleanString(author))
-
-
- # Finding the status of the author
-
- # Dread does not have membergroup and postgroup, but it has flair, similar enough
-
-
- postgroup = None
- if flair != None:
- if postgroup != None:
- postgroup = postgroup.text.strip()
- flair = flair + " - " + postgroup
- else:
- if postgroup != None:
- flair = postgroup.text.strip()
- else:
- flair = "-1"
-
- status.append(cleanString(flair))
- # print("status " + cleanString(membergroup))
- # Finding the interest of the author
- # Dread does not have blurb
-
- interest.append(-1)
-
- # Finding the reputation of the user
- # Dread doesn't have reputation per user, but instead each post has its own point system
- karma = cc.find('div', {"class": "votes"})
-
- if karma != None:
- karma = karma.text
- karma = karma.replace("points","")
- karma = karma.replace(":", "")
- karma = karma.strip()
- else:
- karma = "-1"
- reputation.append(cleanString(karma))
- # print("karma " + cleanString(karma))
- # Getting here another good tag to find the post date, post content and users' signature
-
- postarea = ipost.find('div', {"class": "timestamp"}).find('span', recursive=False)
- dt = postarea['title']
- month = find_month(dt)
- split_text = dt.split()
- day = int(re.search(r'\d+', split_text[0]).group())
- year = int(split_text[2])
- hm = re.findall(r'\d+', split_text[-1])
- hm[0] = int(hm[0])
- hm[1] = int(hm[1])
- date_time_obj = datetime(year, month, day, hour=hm[0], minute=hm[1])
- addDate.append(date_time_obj)
-
- # Finding the post
-
- inner = ipost.find('div', {"class": "commentBody"})
- inner = inner.text.strip()
- # print(inner)
- post.append(cleanString(inner))
-
- # No signature for Dread
-
- sign.append(-1)
-
- # As no information about users's feedback was found, just assign "-1" to the variable
-
- feedback.append("-1")
-
- # Populate the final variable (this should be a list with all fields scraped)
-
- row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)
-
- # Sending the results
-
- return row
-
- # This is the method to parse the Listing Pages (one page with many posts)
-
- def dread_listing_parser(soup):
-
- nm = 0 # *this variable should receive the number of topics
- forum = "Dread" # 0 *forum name
- board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree.
- # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
- author = [] # 2 *all authors of each topic
- topic = [] # 3 *all topics
- views = [] # 4 number of views of each topic
- posts = [] # 5 number of posts of each topic
- href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between
- # Listing and Description pages)
- addDate = [] # 7 when the topic was created (difficult to find)
-
- # Finding the board (should be just one)
-
- board = soup.find('a', {"class": "banner-top"}).text
- board = cleanString(board.strip())
-
- # Finding the repeated tag that corresponds to the listing of topics
-
- itopics = soup.find("div", {"class": "postBoard"}).find_all("div", {"class": "item"}, recursive=False)
-
- for itopic in itopics:
-
- # For each topic found, the structure to get the rest of the information can be of two types. Testing all of them
- # to don't miss any topic
-
- # Adding the topic to the topic list
- topic_title = itopic.find("a", {"class": "title"})
- title_flair = topic_title.find('span', {"class": "flair"})
- topics = topic_title.text
- try:
- title_flair = title_flair.text.strip()
- topics = topics.replace(title_flair, '')
- except:
- pass
- topics = topics.replace(u'\xa0', ' ')
- topics = topics.replace(",", "")
- topics = topics.replace("\n", "")
- topic.append(cleanString(topics.strip()))
-
- # Counting how many topics we have found so far
-
- nm = len(topic)
-
- # Adding the url to the list of urls
- link = topic_title['href']
- link = cleanLink(link)
- href.append(link)
-
- # Finding the author of the topic
- ps = itopic.find('div', {"class": "author"})
- post_wrapper = ps.select_one('a[href^="/u/"]').text
- flair = ps.find("span", {"class": "flair"})
- try:
- flair = flair.text.strip()
- post_wrapper = post_wrapper.replace(flair, '')
- except:
- pass
- user = post_wrapper.strip()
- author.append(cleanString(user))
-
- # Finding the number of replies
- meta = itopic.find("div", {"class": "postMain"})
- post = meta.find("a").text
- post = post.replace("comments", '').strip()
- posts.append(cleanString(post))
-
- # Finding the number of Views - not shown in Dread
- views.append("-1")
-
- # If no information about when the topic was added, just assign "-1" to the variable
- spans = itopic.find('div', {"class": "author"}).find('span', recursive=False)
- dt = spans['title']
- month = find_month(dt)
- split_text = dt.split()
- day = int(re.search(r'\d+', split_text[0]).group())
- year = int(split_text[2])
- hm = re.findall(r'\d+', split_text[-1])
- hm[0] = int(hm[0])
- hm[1] = int(hm[1])
- date_time_obj = datetime(year, month, day, hour=hm[0], minute=hm[1])
- addDate.append(date_time_obj)
-
- return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate)
-
-
- def dread_links_parser(soup):
-
- # Returning all links that should be visited by the Crawler
-
- href = []
- #print(soup.find('table', {"class": "tborder clear"}).find(
- # 'tbody').find_all('tr', {"class": "inline_row"}))
-
- listing = soup.find("div", {"class": "postBoard"}).find_all("div",{"class": "item"}, recursive=False)
-
- for a in listing:
- link = a.find("a", {"class": "title"})
- link = link['href']
-
- href.append(link)
-
- return href
-
- def find_month(s):
- if 'January' in s:
- return 1
- elif 'February' in s:
- return 2
- elif 'March' in s:
- return 3
- elif 'April' in s:
- return 4
- elif 'May' in s:
- return 5
- elif 'June' in s:
- return 6
- elif 'July' in s:
- return 7
- elif 'August' in s:
- return 8
- elif 'September' in s:
- return 9
- elif 'October' in s:
- return 10
- elif 'November' in s:
- return 11
- elif 'December' in s:
- return 12
|