|
@ -0,0 +1,302 @@ |
|
|
|
|
|
__author__ = 'Helium' |
|
|
|
|
|
|
|
|
|
|
|
''' |
|
|
|
|
|
CityMarket Marketplace Crawler (Selenium) |
|
|
|
|
|
not complete |
|
|
|
|
|
need to go through multiple pages... |
|
|
|
|
|
''' |
|
|
|
|
|
|
|
|
|
|
|
from selenium import webdriver |
|
|
|
|
|
from selenium.common.exceptions import NoSuchElementException |
|
|
|
|
|
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile |
|
|
|
|
|
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary |
|
|
|
|
|
from selenium.webdriver.firefox.service import Service |
|
|
|
|
|
from selenium.webdriver.support.ui import WebDriverWait |
|
|
|
|
|
from selenium.webdriver.support import expected_conditions as EC |
|
|
|
|
|
from selenium.webdriver.common.by import By |
|
|
|
|
|
|
|
|
|
|
|
from PIL import Image |
|
|
|
|
|
import urllib.parse as urlparse |
|
|
|
|
|
import os, re, time |
|
|
|
|
|
from datetime import date |
|
|
|
|
|
import subprocess |
|
|
|
|
|
import configparser |
|
|
|
|
|
from bs4 import BeautifulSoup |
|
|
|
|
|
from MarketPlaces.Initialization.prepare_parser import new_parse |
|
|
|
|
|
from MarketPlaces.NexusMarket.parser import nexus_links_parser |
|
|
|
|
|
from MarketPlaces.Utilities.utilities import cleanHTML |
|
|
|
|
|
|
|
|
|
|
|
counter = 1 |
|
|
|
|
|
baseURL = 'http://nexusabcdkq4pdlubs6wk6ad7pobuupzoomoxi6p7l32ci4vjtb2z7yd.onion/' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Opens Tor Browser, crawls the website, then parses, then closes tor |
|
|
|
|
|
#acts like the main method for the crawler, another function at the end of this code calls this function later |
|
|
|
|
|
def startCrawling(): |
|
|
|
|
|
mktName = getMKTName() |
|
|
|
|
|
driver = getAccess() |
|
|
|
|
|
|
|
|
|
|
|
if driver != 'down': |
|
|
|
|
|
try: |
|
|
|
|
|
login(driver) |
|
|
|
|
|
crawlForum(driver) |
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
print(driver.current_url, e) |
|
|
|
|
|
closeDriver(driver) |
|
|
|
|
|
|
|
|
|
|
|
new_parse(mktName, baseURL, True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Returns the name of the website |
|
|
|
|
|
#return: name of site in string type |
|
|
|
|
|
def getMKTName(): |
|
|
|
|
|
name = 'NexusMarket' |
|
|
|
|
|
return name |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Return the base link of the website |
|
|
|
|
|
#return: url of base site in string type |
|
|
|
|
|
def getFixedURL(): |
|
|
|
|
|
url = 'http://nexusabcdkq4pdlubs6wk6ad7pobuupzoomoxi6p7l32ci4vjtb2z7yd.onion/' |
|
|
|
|
|
return url |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Closes Tor Browser |
|
|
|
|
|
#@param: current selenium driver |
|
|
|
|
|
def closeDriver(driver): |
|
|
|
|
|
# global pid |
|
|
|
|
|
# os.system("taskkill /pid " + str(pro.pid)) |
|
|
|
|
|
# os.system("taskkill /t /f /im tor.exe") |
|
|
|
|
|
print('Closing Tor...') |
|
|
|
|
|
driver.close() |
|
|
|
|
|
time.sleep(3) |
|
|
|
|
|
return |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Creates FireFox 'driver' and configure its 'Profile' |
|
|
|
|
|
# to use Tor proxy and socket |
|
|
|
|
|
def createFFDriver(): |
|
|
|
|
|
from MarketPlaces.Initialization.markets_mining import config |
|
|
|
|
|
|
|
|
|
|
|
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) |
|
|
|
|
|
|
|
|
|
|
|
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) |
|
|
|
|
|
ff_prof.set_preference("places.history.enabled", False) |
|
|
|
|
|
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) |
|
|
|
|
|
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) |
|
|
|
|
|
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) |
|
|
|
|
|
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) |
|
|
|
|
|
ff_prof.set_preference("signon.rememberSignons", False) |
|
|
|
|
|
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) |
|
|
|
|
|
ff_prof.set_preference("network.dns.disablePrefetch", True) |
|
|
|
|
|
ff_prof.set_preference("network.http.sendRefererHeader", 0) |
|
|
|
|
|
ff_prof.set_preference("permissions.default.image", 3) |
|
|
|
|
|
ff_prof.set_preference("browser.download.folderList", 2) |
|
|
|
|
|
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) |
|
|
|
|
|
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") |
|
|
|
|
|
ff_prof.set_preference('network.proxy.type', 1) |
|
|
|
|
|
ff_prof.set_preference("network.proxy.socks_version", 5) |
|
|
|
|
|
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') |
|
|
|
|
|
ff_prof.set_preference('network.proxy.socks_port', 9150) |
|
|
|
|
|
ff_prof.set_preference('network.proxy.socks_remote_dns', True) |
|
|
|
|
|
ff_prof.set_preference("javascript.enabled", False) |
|
|
|
|
|
ff_prof.update_preferences() |
|
|
|
|
|
|
|
|
|
|
|
service = Service(config.get('TOR', 'geckodriver_path')) |
|
|
|
|
|
|
|
|
|
|
|
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) |
|
|
|
|
|
|
|
|
|
|
|
driver.maximize_window() |
|
|
|
|
|
|
|
|
|
|
|
return driver |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' |
|
|
|
|
|
#return: return the selenium driver or string 'down' |
|
|
|
|
|
def getAccess(): |
|
|
|
|
|
url = getFixedURL() |
|
|
|
|
|
driver = createFFDriver() |
|
|
|
|
|
try: |
|
|
|
|
|
driver.get(url) |
|
|
|
|
|
return driver |
|
|
|
|
|
except: |
|
|
|
|
|
driver.close() |
|
|
|
|
|
return 'down' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha |
|
|
|
|
|
# then allows for manual solving of captcha in the terminal |
|
|
|
|
|
#@param: current selenium web driver |
|
|
|
|
|
def login(driver): |
|
|
|
|
|
input("Press ENTER when CAPTCHA is complete and login page has loaded\n") |
|
|
|
|
|
|
|
|
|
|
|
# entering username and password into input boxes |
|
|
|
|
|
usernameBox = driver.find_element(by=By.XPATH, value='//input[@id="username"]') |
|
|
|
|
|
# Username here |
|
|
|
|
|
usernameBox.send_keys('findingmykeys') |
|
|
|
|
|
passwordBox = driver.find_element(by=By.XPATH, value='//input[@id="inputPassword3"]') |
|
|
|
|
|
# Password here |
|
|
|
|
|
passwordBox.send_keys('ican’tFindMycarKey$') |
|
|
|
|
|
|
|
|
|
|
|
input("Press ENTER when CAPTCHA is completed and you exit the newsletter\n") |
|
|
|
|
|
|
|
|
|
|
|
# wait for listing page show up (This Xpath may need to change based on different seed url) |
|
|
|
|
|
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( |
|
|
|
|
|
(By.XPATH, '//*[@id="collapse3"]'))) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Saves the crawled html page, makes the directory path for html pages if not made |
|
|
|
|
|
def savePage(driver, page, url): |
|
|
|
|
|
cleanPage = cleanHTML(driver, page) |
|
|
|
|
|
filePath = getFullPathName(url) |
|
|
|
|
|
os.makedirs(os.path.dirname(filePath), exist_ok=True) |
|
|
|
|
|
open(filePath, 'wb').write(cleanPage.encode('utf-8')) |
|
|
|
|
|
return |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Gets the full path of the page to be saved along with its appropriate file name |
|
|
|
|
|
#@param: raw url as crawler crawls through every site |
|
|
|
|
|
def getFullPathName(url): |
|
|
|
|
|
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE |
|
|
|
|
|
|
|
|
|
|
|
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") |
|
|
|
|
|
fileName = getNameFromURL(url) |
|
|
|
|
|
if isDescriptionLink(url): |
|
|
|
|
|
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') |
|
|
|
|
|
else: |
|
|
|
|
|
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') |
|
|
|
|
|
return fullPath |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned |
|
|
|
|
|
#@param: raw url as crawler crawls through every site |
|
|
|
|
|
def getNameFromURL(url): |
|
|
|
|
|
global counter |
|
|
|
|
|
name = ''.join(e for e in url if e.isalnum()) |
|
|
|
|
|
if (name == ''): |
|
|
|
|
|
name = str(counter) |
|
|
|
|
|
counter = counter + 1 |
|
|
|
|
|
return name |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list |
|
|
|
|
|
#in this example, there are a couple of categories some threads fall under such as |
|
|
|
|
|
# Guides and Tutorials, Digital Products, and Software and Malware |
|
|
|
|
|
#as you can see they are categories of products |
|
|
|
|
|
def getInterestedLinks(): |
|
|
|
|
|
links = [] |
|
|
|
|
|
|
|
|
|
|
|
# # Hire hacker |
|
|
|
|
|
# links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=3') |
|
|
|
|
|
# # other |
|
|
|
|
|
# links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=14') |
|
|
|
|
|
# malware |
|
|
|
|
|
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=15') |
|
|
|
|
|
# ddos |
|
|
|
|
|
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=16') |
|
|
|
|
|
# software |
|
|
|
|
|
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=17') |
|
|
|
|
|
# botnet |
|
|
|
|
|
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=18') |
|
|
|
|
|
# hacking service |
|
|
|
|
|
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=31') |
|
|
|
|
|
|
|
|
|
|
|
return links |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through |
|
|
|
|
|
#topic and description pages are crawled through here, where both types of pages are saved |
|
|
|
|
|
#@param: selenium driver |
|
|
|
|
|
def crawlForum(driver): |
|
|
|
|
|
print("Crawling the CityMarket market") |
|
|
|
|
|
|
|
|
|
|
|
linksToCrawl = getInterestedLinks() |
|
|
|
|
|
|
|
|
|
|
|
i = 0 |
|
|
|
|
|
while i < len(linksToCrawl): |
|
|
|
|
|
link = linksToCrawl[i] |
|
|
|
|
|
print('Crawling :', link) |
|
|
|
|
|
try: |
|
|
|
|
|
has_next_page = True |
|
|
|
|
|
count = 0 |
|
|
|
|
|
|
|
|
|
|
|
while has_next_page: |
|
|
|
|
|
try: |
|
|
|
|
|
driver.get(link) |
|
|
|
|
|
except: |
|
|
|
|
|
driver.refresh() |
|
|
|
|
|
html = driver.page_source |
|
|
|
|
|
savePage(driver, html, linksToCrawl[i] + f"page{count+1}") |
|
|
|
|
|
|
|
|
|
|
|
list = productPages(html) |
|
|
|
|
|
for item in list: |
|
|
|
|
|
# what is this line doing? |
|
|
|
|
|
itemURL = urlparse.urljoin(baseURL, str(item)) |
|
|
|
|
|
try: |
|
|
|
|
|
driver.get(itemURL) |
|
|
|
|
|
except: |
|
|
|
|
|
driver.refresh() |
|
|
|
|
|
savePage(driver, driver.page_source, item) |
|
|
|
|
|
driver.back() |
|
|
|
|
|
|
|
|
|
|
|
# # comment out |
|
|
|
|
|
# break |
|
|
|
|
|
# |
|
|
|
|
|
# # comment out |
|
|
|
|
|
# if count == 1: |
|
|
|
|
|
# break |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
link = driver.find_element(by=By.XPATH, value='//a[@rel="next"]').get_attribute('href') |
|
|
|
|
|
if link == "": |
|
|
|
|
|
raise NoSuchElementException |
|
|
|
|
|
count += 1 |
|
|
|
|
|
|
|
|
|
|
|
except NoSuchElementException: |
|
|
|
|
|
has_next_page = False |
|
|
|
|
|
|
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
print(link, e) |
|
|
|
|
|
i += 1 |
|
|
|
|
|
|
|
|
|
|
|
print("Crawling the CityMarket market done.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Returns 'True' if the link is a description link |
|
|
|
|
|
#@param: url of any url crawled |
|
|
|
|
|
#return: true if is a description page, false if not |
|
|
|
|
|
def isDescriptionLink(url): |
|
|
|
|
|
if 'single_product' in url: |
|
|
|
|
|
return True |
|
|
|
|
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Returns True if the link is a listingPage link |
|
|
|
|
|
#@param: url of any url crawled |
|
|
|
|
|
#return: true if is a Listing page, false if not |
|
|
|
|
|
def isListingLink(url): |
|
|
|
|
|
if 'sub_id' in url: |
|
|
|
|
|
return True |
|
|
|
|
|
return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# calling the parser to define the links, the html is the url of a link from the list of interested link list |
|
|
|
|
|
#@param: link from interested link list ie. getInterestingLinks() |
|
|
|
|
|
#return: list of description links that should be crawled through |
|
|
|
|
|
def productPages(html): |
|
|
|
|
|
soup = BeautifulSoup(html, "html.parser") |
|
|
|
|
|
return nexus_links_parser(soup) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Drop links that "signout" |
|
|
|
|
|
# def isSignOut(url): |
|
|
|
|
|
# #absURL = urlparse.urljoin(url.base_url, url.url) |
|
|
|
|
|
# if 'signout' in url.lower() or 'logout' in url.lower(): |
|
|
|
|
|
# return True |
|
|
|
|
|
# |
|
|
|
|
|
# return False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def crawler(): |
|
|
|
|
|
startCrawling() |
|
|
|
|
|
# print("Crawling and Parsing NexusMarket .... DONE!") |