__author__ = 'DarkWeb' ''' Ares Market Crawler (Selenium) ''' from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.firefox.firefox_profile import FirefoxProfile from selenium.webdriver.firefox.firefox_binary import FirefoxBinary from selenium.webdriver.firefox.service import Service from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from PIL import Image import urllib.parse as urlparse import os, time from datetime import date import subprocess from bs4 import BeautifulSoup from MarketPlaces.Initialization.prepare_parser import new_parse from MarketPlaces.Ares.parser import ares_links_parser from MarketPlaces.Utilities.utilities import cleanHTML counter = 1 baseURL = 'http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion' # Opens Tor Browser, crawls the website def startCrawling(): marketName = getMarketName() driver = getAccess() if driver != 'down': try: login(driver) crawlForum(driver) except Exception as e: print(driver.current_url, e) closeDriver(driver) new_parse(marketName, False) # Login using premade account credentials and do login captcha manually def login(driver): #wait for login page WebDriverWait(driver, 100).until(EC.visibility_of_element_located( (By.XPATH, "/html/body/div[3]/div[3]/div[2]/div/div[2]/div/center"))) #entering username and password into input boxes usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]') #Username here usernameBox.send_keys('blabri') passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]') #Password here passwordBox.send_keys('fishowal') ''' # wait for captcha page show up WebDriverWait(driver, 100).until(EC.visibility_of_element_located( (By.XPATH, "/html/body/div[3]/div[3]/div[2]/div/div[2]/div/form/div/div[3]/div/div/img"))) # save captcha to local driver.find_element(by=By.XPATH, value='/html/body/div[3]/div[3]/div[2]/div/div[2]/div/form/div/div[3]/div/div/img').screenshot( r'..\Ares\captcha.png') # This method will show image in any image viewer im = Image.open(r'..\Ares\captcha.png') im.show() # wait until input space show up inputBox = driver.find_element(by=By.XPATH, value='/html/body/div[3]/div[3]/div[2]/div/div[2]/div/form/div/div[3]/input') # ask user input captcha solution in terminal userIn = input("Enter solution: ") # send user solution into the input space inputBox.send_keys(userIn) # click the verify(submit) button driver.find_element(by=By.XPATH, value="/html/body/div[3]/div[3]/div[2]/div/div[2]/div/form/div/div[4]/div/div/button").click() ''' input("Press ENTER when CAPTCHA is completed\n") # wait for listing page show up (This Xpath may need to change based on different seed url) WebDriverWait(driver, 50).until(EC.visibility_of_element_located( (By.XPATH, '/html/body/div[7]/div[3]/div[2]/div[1]/div[1]'))) # Returns the name of the website def getMarketName(): name = 'Ares' return name # Return the link of the website def getFixedURL(): url = 'http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion' return url # Closes Tor Browser def closeDriver(driver): # global pid # os.system("taskkill /pid " + str(pro.pid)) # os.system("taskkill /t /f /im tor.exe") print('Closing Tor...') driver.quit() time.sleep(3) return # Creates FireFox 'driver' and configure its 'Profile' # to use Tor proxy and socket def createFFDriver(): from MarketPlaces.Initialization.markets_mining import config ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) ff_prof.set_preference("places.history.enabled", False) ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) ff_prof.set_preference("signon.rememberSignons", False) ff_prof.set_preference("network.cookie.lifetimePolicy", 2) ff_prof.set_preference("network.dns.disablePrefetch", True) ff_prof.set_preference("network.http.sendRefererHeader", 0) ff_prof.set_preference("permissions.default.image", 3) ff_prof.set_preference("browser.download.folderList", 2) ff_prof.set_preference("browser.download.manager.showWhenStarting", False) ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") ff_prof.set_preference('network.proxy.type', 1) ff_prof.set_preference("network.proxy.socks_version", 5) ff_prof.set_preference('network.proxy.socks', '127.0.0.1') ff_prof.set_preference('network.proxy.socks_port', 9150) ff_prof.set_preference('network.proxy.socks_remote_dns', True) ff_prof.set_preference("javascript.enabled", False) ff_prof.update_preferences() service = Service(config.get('TOR', 'geckodriver_path')) driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) driver.maximize_window() return driver def getAccess(): url = getFixedURL() driver = createFFDriver() try: driver.get(url) return driver except: driver.close() return 'down' # Saves the crawled html page def savePage(driver, page, url): cleanPage = cleanHTML(driver, page) filePath = getFullPathName(url) os.makedirs(os.path.dirname(filePath), exist_ok=True) open(filePath, 'wb').write(cleanPage.encode('utf-8')) return # Gets the full path of the page to be saved along with its appropriate file name def getFullPathName(url): from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") fileName = getNameFromURL(url) if isDescriptionLink(url): fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') else: fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') return fullPath # Creates the file name from passed URL def getNameFromURL(url): global counter name = ''.join(e for e in url if e.isalnum()) if name == '': name = str(counter) counter = counter + 1 return name def getInterestedLinks(): links = [] # # Digital - Other # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/91ecd5d0-002c-11ec-9b46-ede2378c5d3c') # # Digital - VPN # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/9431b830-002b-11ec-86d6-cdaf65cd97f1') # # Digital - Coding # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/948b7400-a939-11ec-adc5-2f775203130c') # Digital - Malware links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/95c37970-002c-11ec-a5dc-1f4432087ed2') # # Digital - Guides # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/9a8bea70-002b-11ec-a3db-c90dd329f662') # # Digital - Hacking # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/a81693f0-002b-11ec-9c39-110550ce4921') # # Digital - Malware # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/b3258c50-002b-11ec-b658-876d3d651145') # # Digital - Services # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/bae64840-002b-11ec-bbcc-a93431540099') # # Digital - Software # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/cff75df0-002b-11ec-8d0a-81fddeb36bf1') # # Digital - Exploits # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/ef029550-002f-11ec-8711-675a8b116ba6') # # Digital - Tutorials # links.append('http://sn2sfdqay6cxztroslaxa36covrhoowe6a5xug6wlm6ek7nmeiujgvad.onion/category/f6e9c3b0-002b-11ec-85aa-c79a6ac8cfe8') return links def crawlForum(driver): print("Crawling the Ares market") linksToCrawl = getInterestedLinks() i = 0 while i < len(linksToCrawl): link = linksToCrawl[i] print('Crawling :', link) try: has_next_page = True count = 0 while has_next_page: try: driver.get(link) except: driver.refresh() html = driver.page_source savePage(driver, html, link) list = productPages(html) for item in list: itemURL = urlparse.urljoin(baseURL, str(item)) try: driver.get(itemURL) except: driver.refresh() savePage(driver, driver.page_source, item) driver.back() # comment out break # comment out if count == 1: break try: nav = driver.find_element(by=By.XPATH, value= '/html/body/div[7]/div[3]/div/div[2]/nav') a = nav.find_element(by=By.LINK_TEXT, value="Next") link = a.get_attribute('href') if link == "": raise NoSuchElementException count += 1 except NoSuchElementException: has_next_page = False except Exception as e: print(link, e) i += 1 input("Crawling Ares market done sucessfully. Press ENTER to continue\n") # Returns 'True' if the link is Topic link def isDescriptionLink(url): if 'product' in url: return True return False # Returns True if the link is a listingPage link def isListingLink(url): if 'category' in url: return True return False # calling the parser to define the links def productPages(html): soup = BeautifulSoup(html, "html.parser") return ares_links_parser(soup) def crawler(): startCrawling() # print("Crawling and Parsing BestCardingWorld .... DONE!")