untested NemesisForums crawler i am unable to run anything related to forumsmain
@ -0,0 +1,279 @@ | |||||
__author__ = 'DarkWeb' | |||||
''' | |||||
Nemesis Forum Crawler (Selenium) | |||||
''' | |||||
from selenium import webdriver | |||||
from selenium.common.exceptions import NoSuchElementException | |||||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||||
from selenium.webdriver.firefox.service import Service | |||||
from selenium.webdriver.common.by import By | |||||
from selenium.webdriver.support import expected_conditions as EC | |||||
from selenium.webdriver.support.ui import WebDriverWait | |||||
from PIL import Image | |||||
import urllib.parse as urlparse | |||||
import os, re, time | |||||
import subprocess | |||||
from bs4 import BeautifulSoup | |||||
from Forums.Initialization.prepare_parser import new_parse | |||||
from Forums.NemesisForums.parser import nemesisforums_links_parser | |||||
from Forums.Utilities.utilities import cleanHTML | |||||
counter = 1 | |||||
baseURL = 'http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/communities' | |||||
# Opens Tor Browser, crawls the website | |||||
def startCrawling(): | |||||
forumName = getForumName() | |||||
driver = getAccess() | |||||
if driver != 'down': | |||||
try: | |||||
login(driver) | |||||
crawlForum(driver) | |||||
except Exception as e: | |||||
print(driver.current_url, e) | |||||
closeDriver(driver) | |||||
# new_parse(forumName, baseURL, True) | |||||
# Login using premade account credentials and do login captcha manually | |||||
def login(driver): | |||||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||||
# wait for 50 sec until id = tab_content is found, then cont | |||||
WebDriverWait(driver, 120).until(EC.visibility_of_element_located( | |||||
(By.XPATH, '/html/body/div/div[1]/div'))) | |||||
# Returns the name of the website | |||||
def getForumName() -> str: | |||||
name = 'NemesisForums' | |||||
return name | |||||
# Return the link of the website | |||||
def getFixedURL(): | |||||
url = 'http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/communities' | |||||
return url | |||||
# Closes Tor Browser | |||||
def closeDriver(driver): | |||||
# global pid | |||||
# os.system("taskkill /pid " + str(pro.pid)) | |||||
# os.system("taskkill /t /f /im tor.exe") | |||||
print('Closing Tor...') | |||||
driver.close() #close tab | |||||
time.sleep(3) | |||||
return | |||||
# Creates FireFox 'driver' and configure its 'Profile' | |||||
# to use Tor proxy and socket | |||||
def createFFDriver(): | |||||
from Forums.Initialization.forums_mining import config | |||||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||||
ff_prof.set_preference("places.history.enabled", False) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||||
ff_prof.set_preference("signon.rememberSignons", False) | |||||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||||
ff_prof.set_preference("permissions.default.image", 3) | |||||
ff_prof.set_preference("browser.download.folderList", 2) | |||||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||||
ff_prof.set_preference('network.proxy.type', 1) | |||||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||||
ff_prof.set_preference("javascript.enabled", True) | |||||
ff_prof.update_preferences() | |||||
service = Service(config.get('TOR', 'geckodriver_path')) | |||||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||||
driver.maximize_window() | |||||
return driver | |||||
def getAccess(): | |||||
url = getFixedURL() | |||||
driver = createFFDriver() | |||||
try: | |||||
driver.get(url) | |||||
return driver | |||||
except: | |||||
driver.close() | |||||
return 'down' | |||||
# Saves the crawled html page | |||||
def savePage(driver, page, url): | |||||
cleanPage = cleanHTML(driver, page) | |||||
filePath = getFullPathName(url) | |||||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||||
return | |||||
# Gets the full path of the page to be saved along with its appropriate file name | |||||
def getFullPathName(url): | |||||
from Forums.Initialization.forums_mining import config, CURRENT_DATE | |||||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages") | |||||
fileName = getNameFromURL(url) | |||||
if not isListingLink(url): | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') | |||||
else: | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') | |||||
return fullPath | |||||
# Creates the file name from passed URL | |||||
def getNameFromURL(url): | |||||
global counter | |||||
name = ''.join(e for e in url if e.isalnum()) | |||||
if name == '': | |||||
name = str(counter) | |||||
counter = counter + 1 | |||||
return name | |||||
def getInterestedLinks(): | |||||
links = [] | |||||
# Carding | |||||
links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Carding') | |||||
# Hacking | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Hacking') | |||||
# # Programming | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Programming') | |||||
# # Malware | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Malware') | |||||
# DDOS | |||||
links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/DDoS') | |||||
# fraud | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Fraud') | |||||
return links | |||||
def crawlForum(driver): | |||||
print("Crawling the Nemesis forum") | |||||
linksToCrawl = getInterestedLinks() | |||||
i = 0 | |||||
while i < len(linksToCrawl): | |||||
link = linksToCrawl[i] | |||||
print('Crawling :', link) | |||||
try: | |||||
has_next_page = True | |||||
count = 0 | |||||
while has_next_page: | |||||
try: | |||||
driver.get(link) | |||||
except: | |||||
driver.refresh() | |||||
html = driver.page_source | |||||
savePage(driver, html, link) | |||||
topics = topicPages(html) | |||||
for topic in topics: | |||||
has_next_topic_page = True | |||||
counter = 1 | |||||
page = topic | |||||
while has_next_topic_page: | |||||
itemURL = urlparse.urljoin(baseURL, str(page)) | |||||
try: | |||||
driver.get(itemURL) | |||||
except: | |||||
driver.refresh() | |||||
if isListingLink(driver.current_url): | |||||
break | |||||
savePage(driver, driver.page_source, topic + f"page{counter}") # very important | |||||
# comment out | |||||
if counter == 2: | |||||
break | |||||
try: | |||||
page = driver.find_element(by=By.XPATH, value='//a[contains(text(), ">")]').get_attribute('href') | |||||
if page == "": | |||||
raise NoSuchElementException | |||||
counter += 1 | |||||
except NoSuchElementException: | |||||
has_next_topic_page = False | |||||
# making sure we go back to the listing page (browser back button simulation) | |||||
try: | |||||
driver.get(link) | |||||
except: | |||||
driver.refresh() | |||||
# # comment out | |||||
# break | |||||
# | |||||
# comment out | |||||
if count == 1: | |||||
break | |||||
try: | |||||
link = driver.find_element(by=By.XPATH, value='//a[contains(text(), ">")]').get_attribute('href') | |||||
if link == "": | |||||
raise NoSuchElementException | |||||
count += 1 | |||||
except NoSuchElementException: | |||||
has_next_page = False | |||||
except Exception as e: | |||||
print(link, e) | |||||
i += 1 | |||||
print("Crawling the Nemesis forum done.") | |||||
# Returns 'True' if the link is Topic link, may need to change for every website | |||||
def isDescriptionLink(url): | |||||
if 'post' in url: | |||||
return True | |||||
return False | |||||
# Returns True if the link is a listingPage link, may need to change for every website | |||||
def isListingLink(url): | |||||
if '.onion/n/' in url: | |||||
return True | |||||
return False | |||||
# calling the parser to define the links | |||||
def topicPages(html): | |||||
soup = BeautifulSoup(html, "html.parser") | |||||
return nemesisforums_links_parser(soup) | |||||
def crawler(): | |||||
startCrawling() | |||||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,278 @@ | |||||
__author__ = 'Helium' | |||||
# Here, we are importing the auxiliary functions to clean or convert data | |||||
from Forums.Utilities.utilities import * | |||||
from datetime import date | |||||
from datetime import timedelta | |||||
import re | |||||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||||
from bs4 import BeautifulSoup | |||||
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages) | |||||
def nemesisforums_description_parser(soup): | |||||
# Fields to be parsed | |||||
topic = "-1" # 0 *topic name | |||||
user = [] # 1 *all users of each post | |||||
status = [] # 2 all user's authority in each post such as (adm, member, dangerous) | |||||
reputation = [] # 3 all user's karma in each post (usually found as a number) | |||||
interest = [] # 4 all user's interest in each post | |||||
sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post) | |||||
post = [] # 6 all messages of each post | |||||
feedback = [] # 7 all feedbacks of each vendor (this was found in just one Forum and with a number format) | |||||
addDate = [] # 8 all dates of each post | |||||
image_user = [] # 9 all user avatars of each post | |||||
image_post = [] # 10 all first images of each post | |||||
# Finding the topic (should be just one coming from the Listing Page) | |||||
li = soup.find("td", {"class": "thead"}).find('strong') | |||||
topic = li.text | |||||
topic = re.sub("\[\w*\]", '', topic) | |||||
topic = topic.replace(",","") | |||||
topic = topic.replace("\n","") | |||||
topic = cleanString(topic.strip()) | |||||
# Finding the repeated tag that corresponds to the listing of posts | |||||
posts = soup.find('table', {"class": "tborder tfixed clear"}).find('td', {"id": "posts_container"}).find_all( | |||||
'div', {"class": "post"}) | |||||
# For each message (post), get all the fields we are interested to: | |||||
for ipost in posts: | |||||
if ipost.find('div', {"class": "deleted_post_author"}): | |||||
continue | |||||
# Finding a first level of the HTML page | |||||
post_wrapper = ipost.find('span', {"class": "largetext"}) | |||||
# Finding the author (user) of the post | |||||
author = post_wrapper.text.strip() | |||||
user.append(cleanString(author)) # Remember to clean the problematic characters | |||||
# Finding the status of the author | |||||
smalltext = ipost.find('div', {"class": "post_author"}) | |||||
if smalltext is not None: | |||||
# CryptBB does have membergroup and postgroup | |||||
membergroup = smalltext.find('div', {"class": "profile-rank"}) | |||||
postgroup = smalltext.find('div', {"class": "postgroup"}) | |||||
if membergroup != None: | |||||
membergroup = membergroup.text.strip() | |||||
if postgroup != None: | |||||
postgroup = postgroup.text.strip() | |||||
membergroup = membergroup + " - " + postgroup | |||||
else: | |||||
if postgroup != None: | |||||
membergroup = postgroup.text.strip() | |||||
else: | |||||
membergroup = "-1" | |||||
status.append(cleanString(membergroup)) | |||||
# Finding the interest of the author | |||||
# CryptBB does not have blurb | |||||
blurb = smalltext.find('li', {"class": "blurb"}) | |||||
if blurb != None: | |||||
blurb = blurb.text.strip() | |||||
else: | |||||
blurb = "-1" | |||||
interest.append(cleanString(blurb)) | |||||
# Finding the reputation of the user | |||||
# CryptBB does have reputation | |||||
author_stats = smalltext.find('div', {"class": "author_statistics"}) | |||||
karma = author_stats.find('strong') | |||||
if karma != None: | |||||
karma = karma.text | |||||
karma = karma.replace("Community Rating: ", "") | |||||
karma = karma.replace("Karma: ", "") | |||||
karma = karma.strip() | |||||
else: | |||||
karma = "-1" | |||||
reputation.append(cleanString(karma)) | |||||
else: | |||||
status.append('-1') | |||||
interest.append('-1') | |||||
reputation.append('-1') | |||||
# Getting here another good tag to find the post date, post content and users' signature | |||||
postarea = ipost.find('div', {"class": "post_content"}) | |||||
dt = postarea.find('span', {"class": "post_date"}).text | |||||
# dt = dt.strip().split() | |||||
dt = dt.strip() | |||||
day=date.today() | |||||
if "Today" in dt: | |||||
today = day.strftime('%m-%d-%Y') | |||||
stime = dt.replace('Today,','').strip() | |||||
date_time_obj = today + ', '+stime | |||||
date_time_obj = datetime.strptime(date_time_obj,'%m-%d-%Y, %I:%M %p') | |||||
elif "Yesterday" in dt: | |||||
yesterday = day - timedelta(days=1) | |||||
yesterday = yesterday.strftime('%m-%d-%Y') | |||||
stime = dt.replace('Yesterday,','').strip() | |||||
date_time_obj = yesterday + ', '+stime | |||||
date_time_obj = datetime.strptime(date_time_obj,'%m-%d-%Y, %I:%M %p') | |||||
elif "ago" in dt: | |||||
date_time_obj = postarea.find('span', {"class": "post_date"}).find('span')['title'] | |||||
date_time_obj = datetime.strptime(date_time_obj, '%m-%d-%Y, %I:%M %p') | |||||
else: | |||||
date_time_obj = datetime.strptime(dt, '%m-%d-%Y, %I:%M %p') | |||||
addDate.append(date_time_obj) | |||||
# Finding the post | |||||
inner = postarea.find('div', {"class": "post_body scaleimages"}) | |||||
quote = inner.find('blockquote') | |||||
if quote is not None: | |||||
quote.decompose() | |||||
inner = inner.text.strip() | |||||
post.append(cleanString(inner)) | |||||
# Finding the user's signature | |||||
# signature = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "moderatorbar"}).find('div', {"class": "signature"}) | |||||
signature = ipost.find('div', {"class": "signature scaleimages"}) | |||||
if signature != None: | |||||
signature = signature.text.strip() | |||||
# print(signature) | |||||
else: | |||||
signature = "-1" | |||||
sign.append(cleanString(signature)) | |||||
# As no information about user's feedback was found, just assign "-1" to the variable | |||||
feedback.append("-1") | |||||
img = ipost.find('div', {"class": "post_body scaleimages"}).find('img') | |||||
if img is not None: | |||||
img = img.get('src').split('base64,')[-1] | |||||
else: | |||||
img = "-1" | |||||
image_post.append(img) | |||||
avatar = ipost.find('div', {"class": "author_avatar"}) | |||||
if avatar is not None: | |||||
img = avatar.find('img') | |||||
if img is not None: | |||||
img = img.get('src').split('base64,')[-1] | |||||
else: | |||||
img = "-1" | |||||
else: | |||||
img = "-1" | |||||
image_user.append(img) | |||||
# Populate the final variable (this should be a list with all fields scraped) | |||||
row = (topic, user, status, reputation, interest, sign, post, feedback, addDate, image_user, image_post) | |||||
# Sending the results | |||||
return row | |||||
# This is the method to parse the Listing Pages (one page with many posts) | |||||
def nemesisforums_listing_parser(soup): | |||||
nm = 0 # *this variable should receive the number of topics | |||||
forum = "NemesisForums" # 0 *forum name | |||||
board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree. | |||||
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware) | |||||
author = [] # 2 *all authors of each topic | |||||
topic = [] # 3 *all topics | |||||
views = [] # 4 number of views of each topic | |||||
posts = [] # 5 number of posts of each topic | |||||
href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between | |||||
# Listing and Description pages) | |||||
addDate = [] # 7 when the topic was created (difficult to find) | |||||
image_author = [] # 8 all author avatars used in each topic | |||||
# Finding the board (should be just one) | |||||
board = soup.find('span', {"class": "active"}).text | |||||
board = cleanString(board.strip()) | |||||
# Finding the repeated tag that corresponds to the listing of topics | |||||
itopics = soup.find_all('tr',{"class": "inline_row"}) | |||||
# Counting how many topics | |||||
nm = len(itopics) | |||||
for itopic in itopics: | |||||
# For each topic found, the structure to get the rest of the information can be of two types. Testing all of them | |||||
# to don't miss any topic | |||||
# Adding the topic to the topic list | |||||
try: | |||||
topics = itopic.find('span', {"class": "subject_old"}).find('a').text | |||||
except: | |||||
topics = itopic.find('span', {"class": "subject_new"}).find('a').text | |||||
topics = re.sub("\[\w*\]", '', topics) | |||||
topic.append(cleanString(topics)) | |||||
image_author.append(-1) | |||||
# Adding the url to the list of urls | |||||
try: | |||||
link = itopic.find('span', {"class": "subject_old"}).find('a').get('href') | |||||
except: | |||||
link = itopic.find('span',{"class": "subject_new"}).find('a').get('href') | |||||
href.append(link) | |||||
# Finding the author of the topic | |||||
ps = itopic.find('div', {"class":"author smalltext"}).text | |||||
user = ps.strip() | |||||
author.append(cleanString(user)) | |||||
# Finding the number of replies | |||||
columns = itopic.findChildren('td',recursive=False) | |||||
replies = columns[3].text | |||||
if replies == '-': | |||||
posts.append('-1') | |||||
else: | |||||
posts.append(cleanString(replies)) | |||||
# Finding the number of Views | |||||
tview = columns[4].text | |||||
if tview == '-': | |||||
views.append('-1') | |||||
else: | |||||
views.append(cleanString(tview)) | |||||
# If no information about when the topic was added, just assign "-1" to the variable | |||||
addDate.append("-1") | |||||
return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate, image_author) | |||||
def nemesisforums_links_parser(soup): | |||||
# Returning all links that should be visited by the Crawler | |||||
href = [] | |||||
listing = soup.find('div', {"class": "card-body"}).find_all('div', {"class": "d-flex border-2 border-bottom overflow-hidden position-relative px-6 pt-4 pb-3"}) | |||||
for a in listing: | |||||
link = a.find('div', {"class": "d-flex align-items-center"}).find('a').get('href') | |||||
href.append(link) | |||||
return href |
@ -0,0 +1,260 @@ | |||||
__author__ = 'Helium' | |||||
''' | |||||
NemesisMarket Crawler (Selenium) | |||||
Website is very slow | |||||
''' | |||||
from selenium import webdriver | |||||
from selenium.common.exceptions import NoSuchElementException | |||||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||||
from selenium.webdriver.firefox.service import Service | |||||
from selenium.webdriver.support.ui import WebDriverWait | |||||
from selenium.webdriver.support.ui import Select | |||||
from selenium.webdriver.support import expected_conditions as EC | |||||
from selenium.webdriver.common.by import By | |||||
from PIL import Image | |||||
import urllib.parse as urlparse | |||||
import os, re, time | |||||
from datetime import date | |||||
import subprocess | |||||
import configparser | |||||
from bs4 import BeautifulSoup | |||||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||||
from MarketPlaces.NemesisMarket.parser import nemesis_links_parser | |||||
from MarketPlaces.Utilities.utilities import cleanHTML | |||||
counter = 1 | |||||
baseURL = 'http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/market' | |||||
def startCrawling(): | |||||
mktName = getMKTName() | |||||
# driver = getAccess() | |||||
# | |||||
# if driver != 'down': | |||||
# try: | |||||
# login(driver) | |||||
# crawlForum(driver) | |||||
# except Exception as e: | |||||
# print(driver.current_url, e) | |||||
# closeDriver(driver) | |||||
new_parse(mktName, baseURL, True) | |||||
# Returns the name of the website | |||||
def getMKTName(): | |||||
name = 'NemesisMarket' | |||||
return name | |||||
# Return the base link of the website | |||||
def getFixedURL(): | |||||
url = 'http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/market' | |||||
return url | |||||
# Closes Tor Browser | |||||
def closeDriver(driver): | |||||
# global pid | |||||
# os.system("taskkill /pid " + str(pro.pid)) | |||||
# os.system("taskkill /t /f /im tor.exe") | |||||
print('Closing Tor...') | |||||
driver.close() | |||||
time.sleep(3) | |||||
return | |||||
# Creates FireFox 'driver' and configure its 'Profile' | |||||
# to use Tor proxy and socket | |||||
def createFFDriver(): | |||||
from MarketPlaces.Initialization.markets_mining import config | |||||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||||
ff_prof.set_preference("places.history.enabled", False) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||||
ff_prof.set_preference("signon.rememberSignons", False) | |||||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||||
ff_prof.set_preference("permissions.default.image", 3) | |||||
ff_prof.set_preference("browser.download.folderList", 2) | |||||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||||
ff_prof.set_preference('network.proxy.type', 1) | |||||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||||
ff_prof.set_preference("javascript.enabled", False) | |||||
ff_prof.update_preferences() | |||||
service = Service(config.get('TOR', 'geckodriver_path')) | |||||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||||
driver.maximize_window() | |||||
return driver | |||||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||||
def getAccess(): | |||||
url = getFixedURL() | |||||
driver = createFFDriver() | |||||
try: | |||||
driver.get(url) | |||||
return driver | |||||
except: | |||||
driver.close() | |||||
return 'down' | |||||
def login(driver): | |||||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||||
(By.XPATH, '/html/body/div/nav[2]/div/div'))) | |||||
def savePage(driver, page, url): | |||||
cleanPage = cleanHTML(driver, page) | |||||
filePath = getFullPathName(url) | |||||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||||
return | |||||
def getFullPathName(url): | |||||
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE | |||||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") | |||||
fileName = getNameFromURL(url) | |||||
if isDescriptionLink(url): | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') | |||||
else: | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') | |||||
return fullPath | |||||
def getMKTName() -> str: | |||||
name = 'NemesisMarket' | |||||
return name | |||||
def getNameFromURL(url): | |||||
global counter | |||||
name = ''.join(e for e in url if e.isalnum()) | |||||
if name == '': | |||||
name = str(counter) | |||||
counter = counter + 1 | |||||
return name | |||||
def getInterestedLinks(): | |||||
links = [] | |||||
# Ransomware | |||||
links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/items/hacking/ransomware') | |||||
# # malware/botnets | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/items/hacking/malware-botnets') | |||||
# # Exploits | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/items/hacking/exploits') | |||||
# DDOS | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/items/hacking/ddos') | |||||
# spam anti-captcha | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/items/hacking/spamming-anti-captcha') | |||||
# # phishing | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/items/hacking/phishing-social-engineering') | |||||
# # hackers for hire | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/items/hacking/hackers-for-hire') | |||||
# # scripts and applications | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/items/hacking/scripts-applications') | |||||
# # other | |||||
# links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/items/hacking/other') | |||||
return links | |||||
def crawlForum(driver): | |||||
print("Crawling the Nemesis Market") | |||||
linksToCrawl = getInterestedLinks() | |||||
i = 0 | |||||
while i < len(linksToCrawl): | |||||
link = linksToCrawl[i] | |||||
print('Crawling :', link) | |||||
try: | |||||
has_next_page = True | |||||
count = 0 | |||||
while has_next_page: | |||||
try: | |||||
driver.get(link) | |||||
except: | |||||
driver.refresh() | |||||
html = driver.page_source | |||||
savePage(driver, html, link) | |||||
list = productPages(html) | |||||
for item in list: | |||||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||||
try: | |||||
driver.get(itemURL) | |||||
except: | |||||
driver.refresh() | |||||
savePage(driver, driver.page_source, item) | |||||
driver.back() | |||||
# comment out | |||||
# break | |||||
# comment out | |||||
if count == 1: | |||||
break | |||||
try: | |||||
link = driver.find_element(by=By.XPATH, value='//a[contains(text(), ">")]').get_attribute('href') | |||||
if link == "": | |||||
raise NoSuchElementException | |||||
count += 1 | |||||
except NoSuchElementException: | |||||
has_next_page = False | |||||
except Exception as e: | |||||
print(link, e) | |||||
i += 1 | |||||
print("Crawling the Nemesis Market done.") | |||||
# Returns 'True' if the link is Topic link, may need to change for every website | |||||
def isDescriptionLink(url): | |||||
if 'item' in url and 'items' not in url: | |||||
return True | |||||
return False | |||||
# Returns True if the link is a listingPage link, may need to change for every website | |||||
def isListingLink(url): | |||||
if 'items' in url: | |||||
return True | |||||
return False | |||||
def productPages(html): | |||||
soup = BeautifulSoup(html, "html.parser") | |||||
return nemesis_links_parser(soup) | |||||
def crawler(): | |||||
startCrawling() |
@ -0,0 +1,338 @@ | |||||
__author__ = 'DarkWeb' | |||||
# Here, we are importing the auxiliary functions to clean or convert data | |||||
from MarketPlaces.Utilities.utilities import * | |||||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||||
from bs4 import BeautifulSoup | |||||
# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||||
# stores info it needs in different lists, these lists are returned after being organized | |||||
# @param: soup object looking at html page of description page | |||||
# return: 'row' that contains a variety of lists that each hold info on the description page | |||||
def nemesis_description_parser(soup): | |||||
# Fields to be parsed | |||||
vendor = "-1" # 0 *Vendor_Name | |||||
success = "-1" # 1 Vendor_Successful_Transactions | |||||
rating_vendor = "-1" # 2 Vendor_Rating | |||||
name = "-1" # 3 *Product_Name | |||||
describe = "-1" # 4 Product_Description | |||||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) | |||||
category = "-1" # 7 Product_Category | |||||
views = "-1" # 8 Product_Number_Of_Views | |||||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||||
rating_item = "-1" # 10 Product_Rating | |||||
addDate = "-1" # 11 Product_AddedDate | |||||
BTC = "-1" # 12 Product_BTC_SellingPrice | |||||
USD = "-1" # 13 Product_USD_SellingPrice | |||||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||||
sold = "-1" # 15 Product_QuantitySold | |||||
left = "-1" # 16 Product_QuantityLeft | |||||
shipFrom = "-1" # 17 Product_ShippedFrom | |||||
shipTo = "-1" # 18 Product_ShippedTo | |||||
image = "-1" # 19 Product_Image | |||||
vendor_image = "-1" # 20 Vendor_Image | |||||
# find vendor name | |||||
vendor = soup.find('div', {'class': 'd-flex align-items-center mt-n1'}).find('a').text | |||||
vendor = cleanString(vendor).strip() | |||||
# find product name | |||||
name = soup.find('div', {'class': 'd-flex w-100 mt-4'}).find('a').text | |||||
name = name.replace(r'[^a-zA-Z0-9]', '').strip() | |||||
# find product description | |||||
describe = soup.find('div', {'class': 'card card-bordered rounded-1 my-6'})\ | |||||
.find('div', {'class': 'fs-5 text-gray-800'}).text | |||||
if describe is not None: | |||||
describe = cleanString(describe).strip() | |||||
else: | |||||
describe = soup.find('div', {'class': 'card card-bordered rounded-1 my-6'}) \ | |||||
.find('div', {'class': 'fs-5 text-gray-800 mt-5'}).text | |||||
if describe is not None: | |||||
describe = cleanString(describe).strip() | |||||
else: | |||||
describe = "-1" | |||||
# find product category | |||||
temp = soup.find('div', {'class': 'fs-7 py-1'}).findAll('a') | |||||
category = temp[1].text | |||||
if category is not None: | |||||
category = category.strip() | |||||
else: | |||||
category='-1' | |||||
print('inside category') | |||||
# all in class | |||||
temp = soup.find('div', {'class': 'ms-4'}).findAll('div') | |||||
# number of reviews | |||||
reviews = temp[1].text | |||||
if reviews is not None: | |||||
reviews = reviews.replace("Reviews: ","").strip() | |||||
else: | |||||
reviews='-1' | |||||
print('in rev') | |||||
# item rating out of 5 | |||||
rating_item = temp[0].text | |||||
if rating_item is None: | |||||
rating_item = rating_item.replace("Rating: ","").replace(" out of 5","").strip() | |||||
else: | |||||
rating_item='-1' | |||||
# amount of item sold | |||||
success = temp[2].text | |||||
if success is not None: | |||||
success = success.replace('Sales: ', '').strip() | |||||
else: | |||||
success='-1' | |||||
print('success') | |||||
# amount | |||||
temp = soup.find('div', {'class': 'text-gray-800 fs-1 fw-bolder mt-6'}).text | |||||
if 'USD' in temp: | |||||
USD = temp.replace('USD', '').strip() | |||||
elif 'EUR' in temp: | |||||
EURO = temp.replace('EUR', '').strip() | |||||
# Finding Vendor Image | |||||
vendor_image = soup.find('div', {"class": 'col'}).find('img') | |||||
if vendor_image is not None: | |||||
vendor_image = vendor_image.get('src') | |||||
vendor_image = vendor_image.split('base64,')[-1] | |||||
else: | |||||
vendor_image = "-1" | |||||
# Finding Product Image | |||||
image = soup.find('div', {"class": 'container p-0'}).find('img') | |||||
if image is not None: | |||||
image = image.get('src') | |||||
image = image.split('base64,')[-1] | |||||
else: | |||||
image = "-1" | |||||
# Searching for CVE and MS categories | |||||
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||||
if cve: | |||||
CVE = " " | |||||
for idx in cve: | |||||
CVE += (idx) | |||||
CVE += " " | |||||
CVE = CVE.replace(',', ' ') | |||||
CVE = CVE.replace('\n', '') | |||||
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||||
if ms: | |||||
MS = " " | |||||
for im in ms: | |||||
MS += (im) | |||||
MS += " " | |||||
MS = MS.replace(',', ' ') | |||||
MS = MS.replace('\n', '') | |||||
# Populating the final variable (this should be a list with all fields scraped) | |||||
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, | |||||
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image) | |||||
# Sending the results | |||||
return row | |||||
# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||||
# stores info it needs in different lists, these lists are returned after being organized | |||||
# @param: soup object looking at html page of listing page | |||||
# return: 'row' that contains a variety of lists that each hold info on the listing page | |||||
def nemesis_listing_parser(soup): | |||||
# Fields to be parsed | |||||
nm = 0 # *Total_Products (Should be Integer) | |||||
mktName = "NemesisMarket" # 0 *Marketplace_Name | |||||
vendor = [] # 1 *Vendor y | |||||
rating_vendor = [] # 2 Vendor_Rating | |||||
success = [] # 3 Vendor_Successful_Transactions | |||||
name = [] # 4 *Product_Name y | |||||
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this | |||||
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this | |||||
category = [] # 7 Product_Category y | |||||
describe = [] # 8 Product_Description | |||||
views = [] # 9 Product_Number_Of_Views | |||||
reviews = [] # 10 Product_Number_Of_Reviews | |||||
rating_item = [] # 11 Product_Rating | |||||
addDate = [] # 12 Product_AddDate | |||||
BTC = [] # 13 Product_BTC_SellingPrice | |||||
USD = [] # 14 Product_USD_SellingPrice y | |||||
EURO = [] # 15 Product_EURO_SellingPrice | |||||
sold = [] # 16 Product_QuantitySold | |||||
qLeft = [] # 17 Product_QuantityLeft | |||||
shipFrom = [] # 18 Product_ShippedFrom | |||||
shipTo = [] # 19 Product_ShippedTo | |||||
image = [] # 20 Product_Image | |||||
image_vendor = [] # 21 Vendor_Image | |||||
href = [] # 22 Product_Links | |||||
cat = soup.find('span', {"class": "text-gray-700 fw-bold fs-2"}).text | |||||
cat = cleanString(cat).strip() | |||||
listing = soup.find('div', {"class": 'row g-5 g-xl-5'}).findAll('div', {"class": 'col-sm-6 col-md-4 col-lg-3 col-xxl-2'}) | |||||
# Populating the Number of Products | |||||
nm = len(listing) | |||||
for a in listing: | |||||
category.append(cat) | |||||
# Adding the url to the list of urls | |||||
link = a.find('a').get('href') | |||||
link = cleanLink(link) | |||||
href.append(link) | |||||
# Finding the Product name | |||||
product = a.find('div', {"class": 'pt-3 pb-4 px-4'}).find('a').text.strip() | |||||
# product = product.replace(r'[^a-zA-Z0-9]', '').strip() | |||||
name.append(product) | |||||
# Finding Product Image | |||||
product_image = a.find('img') | |||||
if product_image is not None: | |||||
product_image = product_image.get('src') | |||||
product_image = product_image.split('base64,')[-1] | |||||
image.append(product_image) | |||||
else: | |||||
image.append("-1") | |||||
# Finding Prices | |||||
price = a.find('div', {"class": "fs-4 text-gray-800 fw-bolder mt-3"}).text | |||||
price = price.strip() | |||||
if 'USD' in price: | |||||
price = price.replace('USD', '') | |||||
price = price.strip() | |||||
USD.append(price) | |||||
EURO.append("-1") | |||||
elif 'EUR' in price: | |||||
price = price.replace('EUR', '') | |||||
price = price.strip() | |||||
EURO.append(price) | |||||
USD.append("-1") | |||||
else: | |||||
USD.append("-1") | |||||
EURO.append('-1') | |||||
# Finding Item Rating | |||||
temp = a.find('div', {"class": "d-flex flex-column"}).findAll('span', {"class": "badge badge-sm badge-light-dark text-gray-800 rounded-1"}) | |||||
rating = temp[0].text.strip() | |||||
if rating is not None: | |||||
rating_item.append(rating) | |||||
else: | |||||
rating_item.append('-1') | |||||
print('rating') | |||||
# find number of reviews | |||||
rev = temp[1].text.strip() | |||||
if rev is not None: | |||||
reviews.append(rev) | |||||
else: | |||||
reviews.append('-1') | |||||
print('reviews') | |||||
# sales | |||||
sales = temp[2].text.strip() | |||||
if sales is not None: | |||||
success.append(sales) | |||||
else: | |||||
success.append('-1') | |||||
print('success') | |||||
# Finding the Vendor | |||||
vendor_name = a.find('div', {"class": "d-flex flex-column"}).find('a').text | |||||
vendor_name = vendor_name.replace(",", "") | |||||
vendor_name = vendor_name.strip() | |||||
vendor.append(vendor_name) | |||||
# vendor image | |||||
vendor_image = a.find('div', {"class": 'd-flex align-items-center'}).find('a').find('img') | |||||
if vendor_image is not None: | |||||
vendor_image = vendor_image.get('src') | |||||
vendor_image = vendor_image.split('base64,')[-1] | |||||
image_vendor.append(vendor_image) | |||||
else: | |||||
image_vendor.append("-1") | |||||
# item description | |||||
description = a.find('div', {"class": 'fs-5 text-gray-800 mt-2'}).text | |||||
if description is not None: | |||||
description = description.replace('\n', ' ') | |||||
describe.append(description) | |||||
else: | |||||
describe.append('-1') | |||||
print('describe') | |||||
# Searching for CVE and MS categories | |||||
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||||
if not cve: | |||||
cveValue = "-1" | |||||
else: | |||||
cee = " " | |||||
for idx in cve: | |||||
cee += (idx) | |||||
cee += " " | |||||
cee = cee.replace(',', ' ') | |||||
cee = cee.replace('\n', '') | |||||
cveValue = cee | |||||
CVE.append(cveValue) | |||||
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||||
if not ms: | |||||
MSValue = "-1" | |||||
else: | |||||
me = " " | |||||
for im in ms: | |||||
me += (im) | |||||
me += " " | |||||
me = me.replace(',', ' ') | |||||
me = me.replace('\n', '') | |||||
MSValue = me | |||||
MS.append(MSValue) | |||||
val="-1" | |||||
sold.append(val) | |||||
views.append(val) | |||||
addDate.append(val) | |||||
BTC.append(val) | |||||
qLeft.append(val) | |||||
shipFrom.append(val) | |||||
shipTo.append(val) | |||||
rating_vendor.append(val) | |||||
# Populate the final variable (this should be a list with all fields scraped) | |||||
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, | |||||
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor) | |||||
# called by the crawler to get description links on a listing page | |||||
# @param: beautifulsoup object that is using the correct html page (listing page) | |||||
# return: list of description links from a listing page | |||||
def nemesis_links_parser(soup): | |||||
# Returning all links that should be visited by the Crawler | |||||
href = [] | |||||
listing = soup.findAll('div', {"class": "col-sm-6 col-md-4 col-lg-3 col-xxl-2"}) | |||||
for a in listing: | |||||
bae = a.findAll('a', href=True) | |||||
# Adding the url to the list of urls | |||||
link = bae[0].get('href') | |||||
href.append(link) | |||||
return href |