__author__ = 'Helium' ''' Anon Market Crawler (Selenium) ''' from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.firefox.firefox_profile import FirefoxProfile from selenium.webdriver.firefox.firefox_binary import FirefoxBinary from selenium.webdriver.firefox.service import Service from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from PIL import Image import urllib.parse as urlparse import os, re, time from datetime import date import subprocess import configparser from bs4 import BeautifulSoup from MarketPlaces.Initialization.prepare_parser import new_parse from MarketPlaces.AnonMarket.parser import AnonMarket_links_parser from MarketPlaces.Utilities.utilities import cleanHTML counter = 1 baseURL = 'http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion' # Opens Tor Browser, crawls the website, then parses, then closes tor #acts like the main method for the crawler, another function at the end of this code calls this function later def startCrawling(): opentor() mktName = getMKTName() driver = getAccess() if driver != 'down': try: crawlForum(driver) except Exception as e: print(driver.current_url, e) closetor(driver) new_parse(mktName, baseURL, True) # Opens Tor Browser #prompts for ENTER input to continue def opentor(): from MarketPlaces.Initialization.markets_mining import config global pid print("Connecting Tor...") pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) pid = pro.pid time.sleep(7.5) input('Tor Connected. Press ENTER to continue\n') return # Returns the name of the website #return: name of site in string type def getMKTName(): name = 'AnonMarket' return name # Return the base link of the website #return: url of base site in string type def getFixedURL(): url = 'http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion' return url # Closes Tor Browser #@param: current selenium driver def closetor(driver): # global pid # os.system("taskkill /pid " + str(pro.pid)) # os.system("taskkill /t /f /im tor.exe") print('Closing Tor...') driver.close() time.sleep(3) return # Creates FireFox 'driver' and configure its 'Profile' # to use Tor proxy and socket def createFFDriver(): from MarketPlaces.Initialization.markets_mining import config ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) ff_prof.set_preference("places.history.enabled", False) ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) ff_prof.set_preference("signon.rememberSignons", False) ff_prof.set_preference("network.cookie.lifetimePolicy", 2) ff_prof.set_preference("network.dns.disablePrefetch", True) ff_prof.set_preference("network.http.sendRefererHeader", 0) ff_prof.set_preference("permissions.default.image", 2) ff_prof.set_preference("browser.download.folderList", 2) ff_prof.set_preference("browser.download.manager.showWhenStarting", False) ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") ff_prof.set_preference('network.proxy.type', 1) ff_prof.set_preference("network.proxy.socks_version", 5) ff_prof.set_preference('network.proxy.socks', '127.0.0.1') ff_prof.set_preference('network.proxy.socks_port', 9150) ff_prof.set_preference('network.proxy.socks_remote_dns', True) ff_prof.set_preference("javascript.enabled", False) ff_prof.update_preferences() service = Service(config.get('TOR', 'geckodriver_path')) driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) driver.maximize_window() return driver #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' #return: return the selenium driver or string 'down' def getAccess(): url = getFixedURL() driver = createFFDriver() try: driver.get(url) return driver except: driver.close() return 'down' def savePage(driver, page, url): cleanPage = cleanHTML(driver, page) filePath = getFullPathName(url) os.makedirs(os.path.dirname(filePath), exist_ok=True) open(filePath, 'wb').write(cleanPage.encode('utf-8')) return # Gets the full path of the page to be saved along with its appropriate file name #@param: raw url as crawler crawls through every site def getFullPathName(url): from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") fileName = getNameFromURL(url) if isDescriptionLink(url): fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') else: fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') return fullPath # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned #@param: raw url as crawler crawls through every site def getNameFromURL(url): global counter name = ''.join(e for e in url if e.isalnum()) if (name == ''): name = str(counter) counter = counter + 1 return name # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list #in this example, there are a couple of categories some threads fall under such as # Guides and Tutorials, Digital Products, and Software and Malware #as you can see they are categories of products def getInterestedLinks(): links = [] # # Software # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/civil_softwares') # # Malware links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/malware') # # Bootkits # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/bootkits') # # Backdoors # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/backdoors') # # Keyloggers # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/keyloggers') # # Wireless Trackers # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/wireless_trackers') # # Screen Scrapers # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/screen_scrapers') # # Mobile Forensic Tools # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/mobile_forensics_tools') # # Wifi Jammers # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/wifi_jammers') # # Carding # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/carding') # # Worms # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/worms') # # Viruses # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/viruses') # # Trojans # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/trojans') # # Botnets # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/botnets') # # Security Technology # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/security_technology') # # Hacks # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/hacks') # # Exploit kits # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/exploit_kit') # # Security # links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/security') return links # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through #topic and description pages are crawled through here, where both types of pages are saved #@param: selenium driver def crawlForum(driver): print("Crawling Anon Market") linksToCrawl = getInterestedLinks() for link in linksToCrawl: print('Crawling :', link) has_next_page = True while has_next_page: try: driver.get(link) except: driver.refresh() html = driver.page_source savePage(driver, html, link) # Get all product links on the current page products_list = productPages(html) for item in products_list: itemURL = urlparse.urljoin(baseURL, str(item)) try: driver.get(itemURL) except: driver.refresh() savePage(driver, driver.page_source, item) driver.back() # Go back to listing after visiting each product # Find the active page number active_page_element = driver.find_element(By.XPATH, '//div[@class="page activepage"]') current_page = int(active_page_element.text) # Locate the next page link try: next_page_element = active_page_element.find_element(By.XPATH, 'following-sibling::a[1]') link = next_page_element.get_attribute('href') except NoSuchElementException: has_next_page = False print("Crawling Anon Market done.") # Returns 'True' if the link is a description link #@param: url of any url crawled #return: true if is a description page, false if not def isDescriptionLink(url): if 'product' in url: return True return False # Returns True if the link is a listingPage link #@param: url of any url crawled #return: true if is a Listing page, false if not def isListingLink(url): if 'category' in url: return True return False # calling the parser to define the links, the html is the url of a link from the list of interested link list #@param: link from interested link list ie. getInterestingLinks() #return: list of description links that should be crawled through def productPages(html): soup = BeautifulSoup(html, "html.parser") return AnonMarket_links_parser(soup) def crawler(): startCrawling() # print("Crawling and Parsing Nexus .... DONE!")