|
|
- __author__ = 'Helium'
-
- # Here, we are importing the auxiliary functions to clean or convert data
- from typing import List
- from Forums.Utilities.utilities import *
- from datetime import date
- from datetime import timedelta
- import re
-
- # Here, we are importing BeautifulSoup to search through the HTML tree
- from bs4 import BeautifulSoup, ResultSet, Tag
-
- # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
-
- def HiddenAnswers_description_parser(soup: BeautifulSoup):
- topic: str = "-1" # topic name
- user: List[str] = [] # all users of each post
- addDate: List[datetime] = [] # all dated of each post
- feedback: List[str] = [] # all feedbacks of each vendor (this was found in just one Forum and with a number format)
- status: List[str] = [] # all user's authority in each post such as (adm, member, dangerous)
- reputation: List[str] = [] # all user's karma in each post (usually found as a number)
- sign: List[str] = [] # all user's signature in each post (usually a standard message after the content of the post)
- post: List[str] = [] # all messages of each post
- interest: List[str] = [] # all user's interest in each post
-
-
- # Finding the topic (should be just one coming from the Listing Page)
- li = soup.find("h1").find("span", {"itemprop": "name"})
- topic = li.text
-
- question: Tag = soup.find("div", {"class": "qa-part-q-view"})
-
- question_user = question.find("span", {"class": "qa-q-view-who-data"}).text
- user.append(cleanString(question_user.strip()))
-
- question_time = question.find("span", {"class": "qa-q-view-when-data"}).find("time").get("datetime")
- datetime_string = question_time.split("+")[0]
- datetime_obj = datetime.strptime(datetime_string, "%Y-%m-%dT%H:%M:%S")
- addDate.append(datetime_obj)
-
- question_user_status = question.find("span", {"class": "qa-q-view-who-title"}).text
- status.append(cleanString(question_user_status.strip()))
-
- question_user_karma = question.find("span", {"class": "qa-q-view-who-points-data"}).text
- # Convert karma to pure numerical string
- if question_user_karma.find("k") > -1:
- question_user_karma = str(float(question_user_karma.replace("k", "")) * 1000)
- reputation.append(cleanString(question_user_karma.strip()))
-
- question_content = question.find("div", {"class": "qa-q-view-content qa-post-content"}).text
- post.append(cleanString(question_content.strip()))
-
- feedback.append("-1")
- sign.append("-1")
- interest.append("-1")
-
-
- answer_list: ResultSet[Tag] = soup.find("div", {"class": "qa-a-list"}).find_all("div", {"class": "qa-a-list-item"})
-
-
- for replies in answer_list:
- user_name = replies.find("span", {"class", "qa-a-item-who-data"}).text
- user.append(cleanString(user_name.strip()))
-
- date_added = replies.find("span", {"class": "qa-a-item-when"}).find("time", {"itemprop": "dateCreated"}).get('datetime')
- date_string = date_added.split("+")[0]
- datetime_obj = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S")
- addDate.append(datetime_obj)
-
-
- post_data = replies.find("div", {"class": "qa-a-item-content qa-post-content"}).find("div",{"itemprop":"text"}).text
- post.append(cleanString(post_data.strip()))
-
- user_reputations = replies.find("span", {"class", "qa-a-item-who-title"}).text
- status.append(cleanString(user_reputations.strip()))
-
- karma = replies.find("span", {"class": "qa-a-item-who-points-data"}).text
- # Convert karma to pure numerical string
- if karma.find("k") > -1:
- karma = str(float(karma.replace("k", "")) * 1000)
- reputation.append(cleanString(karma.strip()))
-
- feedback.append("-1")
- sign.append("-1")
- interest.append("-1")
-
- # Populate the final variable (this should be a list with all fields scraped)
-
- row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)
-
- # Sending the results
- return row
-
-
- def HiddenAnswers_listing_parser(soup: BeautifulSoup):
- board = "-1" # board name (the previous level of the topic in the Forum categorization tree.
- # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
- forum: str = "HiddenAnswers"
- nm: int = 0 # this variable should receive the number of topics
- topic: List[str] = [] # all topics
- user: List[str] = [] # all users of each topic
- post: List[int] = [] # number of posts of each topic
- view: List[int] = [] # number of views of each topic
- addDate: List[str] = [] # when the topic was created (difficult to find)
- href: List[str] = [] # this variable should receive all cleaned urls (we will use this to do the merge between
- # Listing and Description pages)
- # Finding the board
- literature = soup.find("div", {"class": "qa-main-heading"}).find("h1")
- board = literature.text
-
- queries_by_user: ResultSet[Tag] = soup.find("div", {"class": "qa-q-list"}).find_all("div", {"class": "qa-q-list-item"})
-
- for queries in queries_by_user:
- topic_of_query = queries.find("div", {"class": "qa-q-item-title"}).find("a").text
- topic.append(cleanString(topic_of_query.strip()))
-
- author = queries.find("span", {"class": "qa-q-item-who-data"}).find("a").text
- user.append(cleanString(author.strip()))
-
- num_answers = queries.find("span", {"class": "qa-a-count-data"}).text
- post.append(cleanString(num_answers.strip()))
-
- view.append("-1")
-
- date_posted = queries.find("span", {"class": "qa-q-item-when-data"}).text
-
- if date_posted.find("day") > 0:
- datetime_obj = datetime.now() - timedelta(days=1)
- else:
- datetime_obj = datetime.strptime(f"{date_posted} {date.today().year}", "%b %d %Y")
- addDate.append(datetime_obj)
- #this link will be cleaned
-
- listing_href = queries.find("div", {"class": "qa-q-item-title"}).find("a").get("href")
- href.append(listing_href)
-
- #need to change this method
- nm = len(topic)
- return organizeTopics(forum, nm, board, user, topic, view, post, href, addDate)
-
- #need to change this method
- def hiddenanswers_links_parser(soup):
-
- # Returning all links that should be visited by the Crawler
-
- href = []
- #print(soup.find('table', {"class": "tborder clear"}).find(
- # 'tbody').find_all('tr', {"class": "inline_row"}))
- listing = soup.find_all('div', {"class": "qa-q-item-title"})
-
- for a in listing:
- link = a.find('a').get('href')
-
- href.append(link)
-
- return href
|