|
|
- __author__ = 'DarkWeb'
-
- '''
- ZeroDay Marketplace Crawler (Selenium)
- '''
-
- from selenium import webdriver
- from selenium.common.exceptions import NoSuchElementException
- from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
- from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
- from selenium.webdriver.firefox.service import Service
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support.ui import Select
- from selenium.webdriver.support import expected_conditions as EC
- from selenium.webdriver.common.by import By
-
- from PIL import Image
- import urllib.parse as urlparse
- import os, re, time
- from datetime import date
- import subprocess
- import configparser
- from bs4 import BeautifulSoup
- from MarketPlaces.Initialization.prepare_parser import new_parse
- from MarketPlaces.ZeroDay.parser import zeroday_links_parser
- from MarketPlaces.Utilities.utilities import cleanHTML
-
- counter = 1
- baseURL = 'http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/'
-
-
- def startCrawling():
- mktName = getMKTName()
- driver = getAccess()
-
- if driver != 'down':
- try:
- # login(driver)
- agreeToTerms(driver)
- crawlForum(driver)
- except Exception as e:
- print(driver.current_url, e)
- closeDriver(driver)
-
- new_parse(mktName, baseURL, True)
-
-
- # Returns the name of the website
- def getMKTName():
- name = 'ZeroDay'
- return name
-
-
- # Return the base link of the website
- def getFixedURL():
- url = 'http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/'
- return url
-
-
- # Closes Tor Browser
- def closeDriver(driver):
- # global pid
- # os.system("taskkill /pid " + str(pro.pid))
- # os.system("taskkill /t /f /im tor.exe")
- print('Closing Tor...')
- driver.close()
- time.sleep(3)
- return
-
-
- # Creates FireFox 'driver' and configure its 'Profile'
- # to use Tor proxy and socket
- def createFFDriver():
- from MarketPlaces.Initialization.markets_mining import config
-
- ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
-
- ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
- ff_prof.set_preference("places.history.enabled", False)
- ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
- ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
- ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
- ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
- ff_prof.set_preference("signon.rememberSignons", False)
- ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
- ff_prof.set_preference("network.dns.disablePrefetch", True)
- ff_prof.set_preference("network.http.sendRefererHeader", 0)
- ff_prof.set_preference("permissions.default.image", 3)
- ff_prof.set_preference("browser.download.folderList", 2)
- ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
- ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
- ff_prof.set_preference('network.proxy.type', 1)
- ff_prof.set_preference("network.proxy.socks_version", 5)
- ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
- ff_prof.set_preference('network.proxy.socks_port', 9150)
- ff_prof.set_preference('network.proxy.socks_remote_dns', True)
- ff_prof.set_preference("javascript.enabled", False)
- ff_prof.update_preferences()
-
- service = Service(config.get('TOR', 'geckodriver_path'))
-
- driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
-
- driver.maximize_window()
-
- return driver
-
-
- #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
- def getAccess():
- url = getFixedURL()
- driver = createFFDriver()
- try:
- driver.get(url)
- return driver
- except:
- driver.close()
- return 'down'
-
- def agreeToTerms(driver):
- try:
- agree_button = driver.find_element(by=By.XPATH, value='//input[@type="submit" and @name="agree" and @value="Yes, I agree"]')
- agree_button.click()
- except Exception as e:
- print('Problem with clicking agree button', e)
-
- def login(driver):
- # input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
- WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
- (By.XPATH, '//*[@id="username"]')))
-
- # entering username and password into input boxes
- usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
- # Username here
- usernameBox.send_keys('blabri')
- passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]')
- # Password here
- passwordBox.send_keys('fishowal')
-
- input("Press ENTER when BROKEN CIRCLE is pressed\n")
-
- # wait for listing page show up (This Xpath may need to change based on different seed url)
- WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
- (By.XPATH, '/html/body/div[6]/div[3]/div[2]/div[1]/div[1]')))
-
-
- def savePage(driver, page, url):
- cleanPage = cleanHTML(driver, page)
- filePath = getFullPathName(url)
- os.makedirs(os.path.dirname(filePath), exist_ok=True)
- open(filePath, 'wb').write(cleanPage.encode('utf-8'))
- return
-
-
- def getFullPathName(url):
- from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
-
- mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
- fileName = getNameFromURL(url)
- if isDescriptionLink(url):
- fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
- else:
- fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
- return fullPath
-
-
- def getMKTName() -> str:
- name = 'ZeroDay'
- return name
-
-
- def getNameFromURL(url):
- global counter
- name = ''.join(e for e in url if e.isalnum())
- if name == '':
- name = str(counter)
- counter = counter + 1
- return name
-
-
- def getInterestedLinks():
- links = []
-
- # Private category sells private exploits and vulnerabilities
-
- # Remote exploits
- links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/private/remote')
- # Local exploits
- links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/private/local')
- # Web App exploits
- links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/private/webapps')
- # doc/poc - denial of service / proof of concept
- links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/private/dos')
-
- # Remote
- links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/remote')
- # Local
- links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/local')
- # Web app
- links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/webapps')
- # dos/poc
- links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/dos')
- # Shellcode
- links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/shellcode')
-
- return links
-
-
- def crawlForum(driver):
-
- print("Crawling the ZeroDay market")
-
- linksToCrawl = getInterestedLinks()
-
- i = 0
- while i < len(linksToCrawl):
- link = linksToCrawl[i]
- print('Crawling :', link)
- try:
- has_next_page = True
- count = 0
-
- while has_next_page:
- try:
- driver.get(link)
- except:
- driver.refresh()
- html = driver.page_source
- savePage(driver, html, link)
- list = productPages(html)
-
- for item in list:
- itemURL = urlparse.urljoin(baseURL, str(item))
- try:
- driver.get(itemURL)
- except:
- driver.refresh()
- savePage(driver, driver.page_source, item)
- driver.back()
-
- # # comment out
- # break
- #
- # comment out
- # if count == 1:
- # break
-
- try:
- link = driver.find_element(by=By.XPATH, value='//a[contains(text(), "next")]').get_attribute('href')
- if link == "":
- raise NoSuchElementException
- count += 1
-
- except NoSuchElementException:
- has_next_page = False
-
- except Exception as e:
- print(link, e)
- i += 1
-
- print("Crawling the Ares market done.")
-
-
- # Returns 'True' if the link is Topic link, may need to change for every website
- def isDescriptionLink(url):
- if 'description' in url:
- return True
- return False
-
-
- # Returns True if the link is a listingPage link, may need to change for every website
- def isListingLink(url):
- if 'category' in url:
- return True
- return False
-
-
- def productPages(html):
- soup = BeautifulSoup(html, "html.parser")
- return zeroday_links_parser(soup)
-
-
- def crawler():
- startCrawling()
|