Browse Source

added Incogsnoo crawler and parser

main
Kimtaiyo Mech 1 year ago
parent
commit
d6be3bf58d
2 changed files with 560 additions and 0 deletions
  1. +289
    -0
      Forums/Incogsnoo/crawler_selenium.py
  2. +271
    -0
      Forums/Incogsnoo/parser.py

+ 289
- 0
Forums/Incogsnoo/crawler_selenium.py View File

@ -0,0 +1,289 @@
__author__ = 'DarkWeb'
'''
Incogsnoo Forum Crawler (Selenium)
'''
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.by import By
import urllib.parse as urlparse
import os, time
from datetime import date
import subprocess
from bs4 import BeautifulSoup
from Forums.Initialization.prepare_parser import new_parse
from Forums.Incogsnoo.parser import incogsnoo_links_parser
from Forums.Utilities.utilities import cleanHTML
counter = 1
baseURL = 'http://tedditfyn6idalzso5wam5qd3kdtxoljjhbrbbx34q2xkcisvshuytad.onion/'
# Opens Tor Browser, crawls the website, then parses, then closes tor
#acts like the main method for the crawler, another function at the end of this code calls this function later
def startCrawling():
forumName = getForumName()
driver = getAccess()
if driver != 'down':
try:
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closeDriver(driver)
new_parse(forumName, baseURL, True)
# Returns the name of the website
#return: name of site in string type
def getForumName():
name = 'Incogsnoo'
return name
# Return the base link of the website
#return: url of base site in string type
def getFixedURL():
url = 'http://tedditfyn6idalzso5wam5qd3kdtxoljjhbrbbx34q2xkcisvshuytad.onion/'
return url
# Closes Tor Browser
#@param: current selenium driver
def closeDriver(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
from Forums.Initialization.forums_mining import config
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)#might need to turn off
ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", True)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
driver.maximize_window()
return driver
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
#return: return the selenium driver or string 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
# Gets the full path of the page to be saved along with its appropriate file name
#@param: raw url as crawler crawls through every site
def getFullPathName(url):
from Forums.Initialization.forums_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages")
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
return fullPath
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
#@param: raw url as crawler crawls through every site
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if (name == ''):
name = str(counter)
counter = counter + 1
return name
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
#in this example, there are a couple of categories some threads fall under such as
#exploits, malware, and hacking tutorials
def getInterestedLinks():
links = []
# Malware
links.append('http://tedditfyn6idalzso5wam5qd3kdtxoljjhbrbbx34q2xkcisvshuytad.onion/r/Malware')
#
return links
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
#topic and description pages are crawled through here, where both types of pages are saved
#@param: selenium driver
def crawlForum(driver):
print("Crawling the Incogsnoo forum")
# edge cases:
# 1. if a comment thread goes too deep, need to click "continue this thread" to show more replies
# 2. the site will sometimes rate limit you and not show the contents.
# right now, there is no detection mechanism and it won't throw any errors
linksToCrawl = getInterestedLinks()
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
has_next_page = True
count = 0
while has_next_page:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, link)
topics = topicPages(html)
for topic in topics:
has_next_topic_page = True
counter = 1
page = topic
while has_next_topic_page:
itemURL = urlparse.urljoin(baseURL, str(page))
try:
driver.get(itemURL)
except:
driver.refresh()
if isListingLink(driver.current_url):
break
savePage(driver, driver.page_source, topic + f"page{counter}") # very important
# comment out
if counter == 2:
break
try:
# incogsnoo doesn't have next button to load more pages of the description
link_tag = driver.find_element(by=By.XPATH, value="/html/body/div[2]/div[last()]/a[contains(text(),'next')]")
link = link_tag.get_attribute("href")
if link == "":
raise NoSuchElementException
counter += 1
except NoSuchElementException:
has_next_topic_page = False
# making sure we go back to the listing page (browser back button simulation)
try:
driver.get(link)
except:
driver.refresh()
# comment out
# break
# comment out
if count == 1:
break
try:
link_tag = driver.find_element(by=By.XPATH, value="/html/body/div[2]/div[last()]/a[contains(text(),'next')]")
link = link_tag.get_attribute("href")
if link == "":
raise NoSuchElementException
count += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
print("Crawling the Incogsnoo forum done.")
# Returns 'True' if the link is a description link
#@param: url of any url crawled
#return: true if is a description page, false if not
def isDescriptionLink(url):
if 'comments' in url:
return True
return False
# Returns True if the link is a listingPage link
#@param: url of any url crawled
#return: true if is a Listing page, false if not
def isListingLink(url):
if isDescriptionLink(url):
return False
return True
# calling the parser to define the links, the html is the url of a link from the list of interested link list
#@param: link from interested link list
#return: list of description links that should be crawled through
def topicPages(html):
soup = BeautifulSoup(html, "html.parser")
#print(soup.find('div', {"class": "forumbg"}).find('ul', {"class": "topiclist topics"}).find('li', {"class": "row bg1"}).find('a', {"class": "topictitle"}, href=True))
return incogsnoo_links_parser(soup)
def crawler():
startCrawling()
# print("Crawling and Parsing BestCardingWorld .... DONE!")

+ 271
- 0
Forums/Incogsnoo/parser.py View File

@ -0,0 +1,271 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from Forums.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def incogsnoo_description_parser(soup):
# Fields to be parsed
topic = "-1" # 0 topic name ***$
user = [] # 1 all users of each post ***$ author
status = [] # 2 all user's authority in each post such as (adm, member, dangerous)
reputation = [] # 3 all users's karma in each post (usually found as a number) ??? ups
interest = [] # 4 all user's interest in each post
sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post)
post = [] # 6 all messages of each post
feedback = [] # 7 all feedbacks of each user (this was found in just one Forum and with a number format)
addDate = [] # 8 all dated of each post ***$ created
image_user = [] # 9 all user avatars of each post
image_post = [] # 10 all first images of each post
# Finding the topic (should be just one coming from the Listing Page)
topic = soup.find("div", {"class": "title"}).find("h2").text
topic = topic.replace('"', '')
topic = cleanString(topic.strip())
# the first post's html is separated from all subsequent comments/replies/posts to the first post
# so parse the first post by itself first
# Finding body of first post
post_text = soup.find("div", {"class": "md"})
if post_text:
post_text = post_text.text.strip()
post.append(cleanString(post_text))
else: # some posts just links to other sites/articles/videos and have no text by itself
post_link = soup.find("div", {"class": "title"}).find("a").get("href")
post_link = cleanLink(post_link)
post.append(post_link)
# User
p_tag = soup.find("p", {"class": "submitted"})
author = p_tag.find("a")
if author:
author = author.text.strip()
elif "[deleted]" in p_tag.text:
author = "deleted"
else:
author = "-1"
user.append(cleanString(author))
# Finding the status of the author
status.append("-1")
# Finding the reputation of the user
reputation.append("-1")
# Finding the interest of the author
interest.append("-1")
# Finding signature
sign.append("-1")
# Finding feedback
upvote = soup.find("div", {"class": "score"}).find("span")
if upvote:
upvote = upvote.text.strip()
else:
upvote = "-1"
feedback.append(cleanString(upvote))
# Finding the date of the post - e.g. "Fri, 18 December 2023 05:49:20 GMT"
dt = soup.find("p", {"class": "submitted"}).find("span")["title"]
# Convert to datetime object - e.g. 2023-12-18 05:49:20
date_time_obj = datetime.strptime(dt, '%a, %d %b %Y %H:%M:%S %Z')
sdate = date_time_obj.strftime('%m %d %Y')
stime = date_time_obj.strftime('%I:%M %p')
date = convertDate(sdate, "english", datetime.now()) + " " + stime
# e.g. "12/18/2023 05:49 AM"
addDate.append(date)
image_user.append("-1")
image_post.append("-1")
posts = soup.find("div", {"class": "comments"}).findAll("details")
# For each message (post), get all the fields we are interested to:
for ipost in posts:
# Finding user
p_tag = ipost.find("p", {"class": "author"})
author = p_tag.find("a")
if author:
author = author.text.strip()
elif "[deleted]" in p_tag.text:
author = "deleted"
else:
author = "-1"
user.append(cleanString(author))
# Finding the status of the author
status.append("-1")
# Finding the reputation of the user
reputation.append("-1")
# Finding the interest of the author
interest.append("-1")
# Finding signature
sign.append("-1")
# Finding the post
comment = ipost.find("div", {"class": "md"})
if comment:
comment = comment.text.strip()
else:
comment = "-1"
post.append(cleanString(comment))
# Finding feedback
upvote = ipost.find("p", {"class": "ups"})
if upvote:
upvote = upvote.text.strip().split()[0]
else:
upvote = "-1"
feedback.append(cleanString(upvote))
# Finding the date of the post - e.g. "Fri, 18 December 2023 05:49:20 GMT"
dt = ipost.find("p", {"class": "created"})["title"]
# Convert to datetime object - e.g. 2023-12-18 05:49:20
date_time_obj = datetime.strptime(dt, '%a, %d %b %Y %H:%M:%S %Z')
sdate = date_time_obj.strftime('%m %d %Y')
stime = date_time_obj.strftime('%I:%M %p')
date = convertDate(sdate, "english", datetime.now()) + " " + stime
# e.g. "12/18/2023 05:49 AM"
addDate.append(date)
image_user.append("-1")
image_post.append("-1")
# Populate the final variable (this should be a list with all fields scraped)
row = (topic, user, status, reputation, interest, sign, post, feedback, addDate, image_user, image_post)
# Sending the results
return row
# This is the method to parse the Listing Pages (one page with many posts)
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def incogsnoo_listing_parser(soup):
nm = 0 # *this variable should receive the number of topics
forum = "Incogsnoo" # 0 *forum name
board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree.
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
author = [] # 2 *all authors of each topic
topic = [] # 3 *all topics
views = [] # 4 number of views of each topic
posts = [] # 5 number of posts of each topic
href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between
# Listing and Description pages)
addDate = [] # 7 when the topic was created (difficult to find)
image_author = [] # 8 all author avatars used in each topic
# Finding the board (should be just one)
board = soup.find("a", {"class": "subreddit"}).find("h2")
board = cleanString(board.text.strip())
# Finding the repeated tag that corresponds to the listing of topics
itopics = soup.find("div", {"id": "links", "class": "sr"}).findAll("div", {"class": "link"})
itopics.pop()
# Counting how many topics we have found so far
nm = len(itopics)
index = 0
for itopic in itopics:
# Finding the author of the topic
p_tag = itopic.find("p", {"class": "submitted"})
user = p_tag.find("a")
if user:
user = user.text.strip()
elif "[deleted]" in p_tag.text:
user = "deleted"
else:
user = "-1"
author.append(cleanString(user))
# Adding the topic to the topic list
topic_title = itopic.find("div", {"class": "title"}).find("h2").text
topic.append(cleanString(topic_title))
# Finding the number of Views
views.append("-1")
# Finding the number of posts
comments = itopic.find("a", {"class": "comments"}).text
number_comments = comments.split()[0]
posts.append(cleanString(number_comments))
# Adding the url to the list of urls
link = itopic.find("a", {"class": "comments"}).get("href")
link = cleanLink(link)
href.append(link)
# Finding dates
p_tag = itopic.find("p", {"class": "submitted"})
dt = p_tag.find("span")["title"]
date_time_obj = datetime.strptime(dt,'%a, %d %b %Y %H:%M:%S %Z')
sdate = date_time_obj.strftime('%m %d %Y')
stime = date_time_obj.strftime('%I:%M %p')
date = convertDate(sdate, "english", datetime.now()) + " " + stime
# e.g. "12/18/2023 05:49 AM"
addDate.append(date)
image_author.append("-1")
index += 1
return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate, image_author)
#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def incogsnoo_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing_parent = soup.find("div", {"id": "links", "class": "sr"})
listing = listing_parent.findAll("div", {"class": "entry"})
count = 0
for entry in listing:
parent_div = entry.find("div", {"class": "meta"}).find("div", {"class", "links"})
a_tag = parent_div.find("a", {"class", "comments"})
if a_tag:
href.append(a_tag.get("href"))
# if count == 10:
# break
count += 1
return href

Loading…
Cancel
Save