|
|
- __author__ = 'DarkWeb'
-
- # Here, we are importing the auxiliary functions to clean or convert data
- from Forums.Utilities.utilities import *
- from datetime import date
- from datetime import timedelta
- import re
-
- # Here, we are importing BeautifulSoup to search through the HTML tree
- from bs4 import BeautifulSoup, ResultSet, Tag
-
- # This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
-
-
-
-
- def libre_description_parser(soup: Tag):
- # Fields to be parsed
-
- topic = "-1" # 0 *topic name
- user = [] # 1 *all users of each post
- status = [] # 2 all user's authority in each post such as (adm, member, dangerous)
- reputation = [] # 3 all user's karma in each post (usually found as a number)
- interest = [] # 4 all user's interest in each post
- sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post)
- post = [] # 6 all messages of each post
- feedback = [] # 7 all feedbacks of each vendor (this was found in just one Forum and with a number format)
- addDate = [] # 8 all dates of each post
-
- # Finding the topic (should be just one coming from the Listing Page)
-
- topic_found = soup.find("a", {"class": "link text-xl text-zinc-300"}).text
- topic = cleanString(topic_found.strip())
-
- original_post: Tag = soup.find("div", {"class": "flex items-start"})
-
- original_user = original_post.find("div", {"class": "info-p"}).find("a", {"class": "link"}).text
- user.append(cleanString(original_user.replace("/u/", "").strip()))
-
- original_user_statistics: ResultSet[Tag] = original_post.find("div", {"class": "info-p"}).find_all("span")
-
- original_time = original_user_statistics[0].text[2:]
- datetime_append = datetime.strptime(original_time, "%Y-%m-%d %H:%M:%S GMT")
- addDate.append(datetime_append)
-
- original_karma = original_user_statistics[1].text[2]
- reputation.append(cleanString(original_karma.strip()))
-
- original_content = soup.find("div", {"class": "content-p"}).text
- post.append(cleanString(original_content.strip()))
-
-
- status.append("-1")
- interest.append("-1")
- sign.append("-1")
- feedback.append("-1")
-
- # Finding the repeated tag that corresponds to the listing of posts
-
- # try:
- posts: ResultSet[Tag] = soup.find_all("div", {"class": "flex items-stretch"})
-
- # For each message (post), get all the fields we are interested to:
-
- for ipost in posts:
- # Finding a first level of the HTML page
-
- # Finding the author (user) of the post
-
- user_name = ipost.find("p", {"class": "text-zinc-400 text-justify"}).find("a", {"class": "link"}).text
- user.append(cleanString(user_name.replace("/u/", "").strip())) # Remember to clean the problematic characters
-
- status.append("-1")
-
- # Finding the interest of the author
- # CryptBB does not have blurb
-
- interest.append("-1")
-
- # Finding the reputation of the user
- # CryptBB does have reputation
-
- karma = ipost.find("p", {"class": "text-zinc-400 text-justify"}).text
- karma_cleaned = karma.split(" ")[6]
- reputation.append(cleanString(karma_cleaned.strip()))
-
- # Getting here another good tag to find the post date, post content and users' signature
-
- date_posted = ipost.find("p", {"class": "text-zinc-400 text-justify"}).text
- date_time_cleaned = date_posted.replace(user_name, "")[3:-12]
- print(date_time_cleaned)
- datetime_append = datetime.strptime(date_time_cleaned, "%Y-%m-%d %H:%M:%S GMT")
- addDate.append(datetime_append)
-
- # Finding the post
- user_post = ipost.find("div", {"class": "content-c"}).text
- post.append(cleanString(user_post))
-
- # Finding the user's signature
-
- sign.append("-1")
-
- # As no information about user's feedback was found, just assign "-1" to the variable
-
- feedback.append("-1")
-
- # Populate the final variable (this should be a list with all fields scraped)
- # print(topic)
- # print(user)
- # print(status)
- # print(reputation)
- # print(interest)
- # print(sign)
- # print(post)
- # print(feedback)
- # print(addDate)
- # print(len(user))
- # print(len(status))
- # print(len(reputation))
- # print(len(interest))
- # print(len(sign))
- # print(len(feedback))
- # print(len(addDate))
-
- row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)
-
- # Sending the results
-
- return row
-
-
- # This is the method to parse the Listing Pages (one page with many posts)
-
- def libre_listing_parser(soup):
- nm = 0 # *this variable should receive the number of topics
- forum = "Libre" # 0 *forum name
- board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree.
- # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
- author = [] # 2 *all authors of each topic
- topic = [] # 3 *all topics
- views = [] # 4 number of views of each topic
- posts = [] # 5 number of posts of each topic
- href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between
- # Listing and Description pages)
- addDate = [] # 7 when the topic was created (difficult to find)
-
- # Finding the board (should be just one)
-
- board = soup.find('div', {"class": "title"}).find("h1").text
- board = cleanString(board.strip())
-
- # Finding the repeated tag that corresponds to the listing of topics
-
- itopics = soup.find("div", {"class", "space-y-2 mt-4"}).find_all('div', {"class": "flex box"})
-
- nm = 0
- for itopic in itopics:
- nm += 1
- # For each topic found, the structure to get the rest of the information can be of two types. Testing all of them
- # to don't miss any topic
-
- # Adding the topic to the topic list
- topic_string = itopic.find("a", {"class": "link text-xl text-zinc-300"}).text
- cleaned_topic_string = cleanString(topic_string.strip())
- topic.append(cleaned_topic_string)
-
- # Adding the url to the list of urls
- link_to_clean = itopic.find("a", {"class": "link text-xl text-zinc-300"}).get("href")
-
- href.append(link_to_clean)
-
- # Finding the author of the topic
- username_not_cleaned = itopic.find('div', {"class": "flex-grow p-2 text-justify"}).find('a').text
- username_cleaned = username_not_cleaned.split("/")[-1]
- author.append(cleanString(username_cleaned))
-
- # Finding the number of views
- num_views = itopic.find_all("div", {"class": "flex items-center"})[0].find("p").text
- views.append(cleanString(num_views))
-
- # Finding the number of replies
- num_replies = itopic.find_all("div", {"class": "flex items-center"})[1].find("p").text
- posts.append(cleanString(num_replies))
-
- # If no information about when the topic was added, just assign "-1" to the variable
-
- date_time_concatenated = itopic.find("p", {"class": "text-sm text-zinc-400 italic"}).text
- date_time_cleaned = date_time_concatenated.replace(username_not_cleaned, "")
- # creating the datetime object
- date_time_array = date_time_cleaned[3:]
- datetime_append = datetime.strptime(date_time_array, "%Y-%m-%d %H:%M:%S GMT")
- addDate.append(datetime_append)
-
- # print(forum)
- # print(nm)
- # print(board)
- # print(author)
- # print(topic)
- # print(views)
- # print(href)
- # print(addDate)
- # print(len(author))
- # print(len(topic))
- # print(len(views))
- # print(len(href))
- # print(len(addDate))
-
- return organizeTopics(
- forum=forum,
- nm=nm,
- board=board,
- author=author,
- topic=topic,
- views=views,
- posts=posts,
- href=href,
- addDate=addDate
- )
-
- def libre_links_parser(soup):
- # Returning all links that should be visited by the Crawler
- href = []
- listing = soup.find_all('div', {"class": "flex-grow p-2 text-justify"})
-
- for a in listing:
- link = a.find('div', {'class': 'flex space-x-2 items-center'}).find('a').get('href')
-
- href.append(link)
-
- return href
|