Browse Source

Merge branch '0day' into 'main'

0day

See merge request dw9372422/dw_pipeline_test!13
main
westernmeadow 10 months ago
parent
commit
d1767b560e
5 changed files with 547 additions and 1 deletions
  1. +1
    -1
      MarketPlaces/Initialization/marketsList.txt
  2. +3
    -0
      MarketPlaces/Initialization/markets_mining.py
  3. +5
    -0
      MarketPlaces/Initialization/prepare_parser.py
  4. +284
    -0
      MarketPlaces/ZeroDay/crawler_selenium.py
  5. +254
    -0
      MarketPlaces/ZeroDay/parser.py

+ 1
- 1
MarketPlaces/Initialization/marketsList.txt View File

@ -1 +1 @@
GoFish
ZeroDay

+ 3
- 0
MarketPlaces/Initialization/markets_mining.py View File

@ -23,6 +23,7 @@ from MarketPlaces.Ares.crawler_selenium import crawler as crawlerAres
from MarketPlaces.Bohemia.crawler_selenium import crawler as crawlerBohemia
from MarketPlaces.TheDarkMarket.crawler_selenium import crawler as crawlerTheDarkMarket
from MarketPlaces.GoFish.crawler_selenium import crawler as crawlerGoFish
from MarketPlaces.ZeroDay.crawler_selenium import crawler as crawlerZeroDay
from MarketPlaces.Torzon.crawler_selenium import crawler as crawlerTorzon
import configparser
@ -133,6 +134,8 @@ if __name__ == '__main__':
crawlerGoFish()
elif mkt == "TheDarkMarket":
crawlerTheDarkMarket()
elif mkt == "ZeroDay":
crawlerZeroDay()
elif mkt == "Torzon":
crawlerTorzon()


+ 5
- 0
MarketPlaces/Initialization/prepare_parser.py View File

@ -26,6 +26,7 @@ from MarketPlaces.CypherMarketplace.parser import *
from MarketPlaces.WeTheNorth.parser import *
from MarketPlaces.Torzon.parser import *
from MarketPlaces.GoFish.parser import *
from MarketPlaces.ZeroDay.parser import *
from MarketPlaces.Classifier.classify_product import predict
@ -161,6 +162,8 @@ def parse_listing(marketPlace, listingFile, soup, createLog, logFile):
rw = wethenorth_listing_parser(soup)
elif marketPlace == "GoFish":
rw = gofish_listing_parser(soup)
elif marketPlace == "ZeroDay":
rw = zeroday_listing_parser(soup)
elif marketPlace == "Torzon":
rw = torzon_listing_parser(soup)
else:
@ -217,6 +220,8 @@ def parse_description(marketPlace, descriptionFile, soup, createLog, logFile):
rmm = wethenorth_description_parser(soup)
elif marketPlace == "GoFish":
rmm = gofish_description_parser(soup)
elif marketPlace == "ZeroDay":
rmm = zeroday_description_parser(soup)
elif marketPlace == "Torzon":
rmm = torzon_description_parser(soup)
else:


+ 284
- 0
MarketPlaces/ZeroDay/crawler_selenium.py View File

@ -0,0 +1,284 @@
__author__ = 'DarkWeb'
'''
ZeroDay Marketplace Crawler (Selenium)
'''
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.ZeroDay.parser import zeroday_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
counter = 1
baseURL = 'http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/'
def startCrawling():
mktName = getMKTName()
driver = getAccess()
if driver != 'down':
try:
# login(driver)
agreeToTerms(driver)
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closeDriver(driver)
new_parse(mktName, baseURL, True)
# Returns the name of the website
def getMKTName():
name = 'ZeroDay'
return name
# Return the base link of the website
def getFixedURL():
url = 'http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/'
return url
# Closes Tor Browser
def closeDriver(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
from MarketPlaces.Initialization.markets_mining import config
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)
ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
driver.maximize_window()
return driver
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
def agreeToTerms(driver):
try:
agree_button = driver.find_element(by=By.XPATH, value='//input[@type="submit" and @name="agree" and @value="Yes, I agree"]')
agree_button.click()
except Exception as e:
print('Problem with clicking agree button', e)
def login(driver):
# input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '//*[@id="username"]')))
# entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
# Username here
usernameBox.send_keys('blabri')
passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]')
# Password here
passwordBox.send_keys('fishowal')
input("Press ENTER when BROKEN CIRCLE is pressed\n")
# wait for listing page show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '/html/body/div[6]/div[3]/div[2]/div[1]/div[1]')))
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
return fullPath
def getMKTName() -> str:
name = 'ZeroDay'
return name
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if name == '':
name = str(counter)
counter = counter + 1
return name
def getInterestedLinks():
links = []
# Private category sells private exploits and vulnerabilities
# Remote exploits
links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/private/remote')
# Local exploits
links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/private/local')
# Web App exploits
links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/private/webapps')
# doc/poc - denial of service / proof of concept
links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/private/dos')
# Remote
links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/remote')
# Local
links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/local')
# Web app
links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/webapps')
# dos/poc
links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/dos')
# Shellcode
links.append('http://sq542reyqwagfkghieehykb6hh6ohku5irarrrbeeo5iyozdbhe5n3id.onion/shellcode')
return links
def crawlForum(driver):
print("Crawling the ZeroDay market")
linksToCrawl = getInterestedLinks()
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
has_next_page = True
count = 0
while has_next_page:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, link)
list = productPages(html)
for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver, driver.page_source, item)
driver.back()
# # comment out
# break
#
# comment out
# if count == 1:
# break
try:
link = driver.find_element(by=By.XPATH, value='//a[contains(text(), "next")]').get_attribute('href')
if link == "":
raise NoSuchElementException
count += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
print("Crawling the Ares market done.")
# Returns 'True' if the link is Topic link, may need to change for every website
def isDescriptionLink(url):
if 'description' in url:
return True
return False
# Returns True if the link is a listingPage link, may need to change for every website
def isListingLink(url):
if 'category' in url:
return True
return False
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return zeroday_links_parser(soup)
def crawler():
startCrawling()

+ 254
- 0
MarketPlaces/ZeroDay/parser.py View File

@ -0,0 +1,254 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
import re
# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
# stores info it needs in different lists, these lists are returned after being organized
# @param: soup object looking at html page of description page
# return: 'row' that contains a variety of lists that each hold info on the description page
def zeroday_description_parser(soup):
# Fields to be parsed
vendor = "-1" # 0 *Vendor_Name y
success = "-1" # 1 Vendor_Successful_Transactions n
rating_vendor = "-1" # 2 Vendor_Rating y
name = "-1" # 3 *Product_Name y
describe = "-1" # 4 Product_Description y
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category y
views = "-1" # 8 Product_Number_Of_Views y
reviews = "-1" # 9 Product_Number_Of_Reviews n
rating_item = "-1" # 10 Product_Rating n
addDate = "-1" # 11 Product_AddedDate y
BTC = "-1" # 12 Product_BTC_SellingPrice y
USD = "-1" # 13 Product_USD_SellingPrice y
EURO = "-1" # 14 Product_EURO_SellingPrice n
sold = "-1" # 15 Product_QuantitySold n
left = "-1" # 16 Product_QuantityLeft n
shipFrom = "-1" # 17 Product_ShippedFrom n
shipTo = "-1" # 18 Product_ShippedTo n
image = "-1" # 19 Product_Image n
vendor_image = "-1" # 20 Vendor_Image n
# Finding Vendor
div_vendor = soup.find('div', {'class': "exploit_view_table_user_content"})
vendor = div_vendor.find('a').text
vendor = cleanString(vendor).strip()
# Finding Vendor Rating (bug in their system shows standard rating)
div_rating = div_vendor.find_next_sibling('div')
rating_vendor = div_rating.find_all('div', {'class': "td"})[1].text
rating_vendor = cleanNumbers(rating_vendor).strip()
# Finding Product Name
div_name = soup.find('div', {'class': "exploit_title"})
name = div_name.find('h1', {'class': "YellowText"}).text
name = cleanString(name).strip()
# Finding Product description
describe = soup.find('meta', attrs={'name': "description"}).get("content")
describe = cleanString(describe).strip()
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if cve:
CVE = " "
for idx in cve:
CVE += (idx)
CVE += " "
CVE = CVE.replace(',', ' ')
CVE = CVE.replace('\n', '')
CVE = CVE.replace('\t', '')
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
if ms:
MS = " "
for im in ms:
MS += (im)
MS += " "
MS = MS.replace(',', ' ')
MS = MS.replace('\n', '')
# Finding category
div_category = soup.find('div', {'class': "td"}, text="Category").find_next_sibling('div', {'class': "td"})
category = div_category.text
category = cleanString(category).strip()
# Finding views
div_views = soup.find('div', {'class': "td"}, text="Views").find_next_sibling('div', {'class': "td"})
views = div_views.text.replace(' ', '').strip()
# Finding added date
div_date = soup.find('div', {'class': 'td'}, text='Date add').find_next_sibling('div', {'class': "td"})
addDate = div_date.text.strip()
# addDate = datetime.strptime(addDate, "%d-%m-%Y")
# Finding BTC and USD/GOLD
div_price = soup.find('div', {'class': "td"}, text="Price")
price = div_price.find_next_sibling('div', {'class': "td"}).text.strip()
if "free" in price.lower():
BTC = 0
USD = 0
else:
price = ''.join(price.split())
index = price.index('BTC')
BTC = price[:index]
USD = price[index + 3:].replace('USD', '')
# Populating the final variable (this should be a list with all fields scraped)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
# Sending the results
return row
# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
# stores info it needs in different lists, these lists are returned after being organized
# @param: soup object looking at html page of listing page
# return: 'row' that contains a variety of lists that each hold info on the listing page
def zeroday_listing_parser(soup):
# Fields to be parsed
nm = 0 # *Total_Products (Should be Integer)
mktName = "ZeroDay" # 0 *Marketplace_Name y
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating y
success = [] # 3 Vendor_Successful_Transactions n
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description n
views = [] # 9 Product_Number_Of_Views y
reviews = [] # 10 Product_Number_Of_Reviews n
rating_item = [] # 11 Product_Rating n
addDate = [] # 12 Product_AddDate y
BTC = [] # 13 Product_BTC_SellingPrice y
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice n
sold = [] # 16 Product_QuantitySold n
qLeft = [] # 17 Product_QuantityLeft n
shipFrom = [] # 18 Product_ShippedFrom n
shipTo = [] # 19 Product_ShippedTo n
image = [] # 20 Product_Image n
image_vendor = [] # 21 Vendor_Image n
href = [] # 22 Product_Links y
listings = soup.findAll('div', {"class": "ExploitTableContent"})
# Populating the Number of Products
nm = len(listings)
for listing in listings:
# Finding the vendor name
vendor_name = listing.find('a', href=lambda href: href and '/author/' in href).text
vendor_name = cleanString(vendor_name).strip()
vendor.append(vendor_name)
# Finding the vendor rating
vendor_level_div = listing.find('div', {'class': "tips_bl"})
vendor_level = vendor_level_div.find('div', {'class': "tips_value_big"}).text
vendor_level = cleanNumbers(vendor_level)
rating_vendor.append(vendor_level)
# Finding the product name
product_name = listing.find('h3').text
product_name = cleanString(product_name).strip()
name.append(product_name)
# Searching for CVE and MS categories
cve = listing.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue = "-1"
else:
cee = " "
for idx in cve:
cee += (idx)
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cee = cee.replace('\t', '')
cveValue = cee
CVE.append(cveValue)
ms = listing.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue = "-1"
else:
me = " "
for im in ms:
me += (im)
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue = me
MS.append(MSValue)
# Finding the category
category_text = listing.find_all('div', {'class': "td"})[2].text
category_text = cleanString(category_text).strip()
category.append(category_text)
# Finding the hrefs
description_link = listing.find('h3').find('a')['href']
href.append(description_link)
# Finding the views
views_text = listing.find_all('div', {'class': "td"})[3].text.replace(' ', '').strip()
views.append(views_text)
# Finding the date added
date = listing.find('div', {'class': "td"}).find('a').text.strip()
# date = datetime.strptime(date, "%d-%m-%Y")
addDate.append(date)
# Finding the BTC and USD/GOLD
btc_listing = listing.find('div', {"class": 'tips_price_btc'})
if btc_listing:
btc_price = btc_listing.text.strip().replace('Open this exploit for ', '').replace(' BTC', '')
crossed = btc_listing.find('span', {'class': "crossed"})
if crossed:
btc_price = crossed.next_sibling.strip().replace(' BTC', '')
else:
btc_price = 0
BTC.append(btc_price)
usd_listing = listing.find('div', {"class": 'tips_price_1'})
if usd_listing:
usd_price = usd_listing.text.strip().replace('Open this exploit for ', '').replace(' GOLD', '')
crossed = usd_listing.find('span', {'class': "crossed"})
if crossed:
usd_price = crossed.next_sibling.strip().replace(' GOLD', '')
usd_price = ''.join(usd_price.replace(' ', ''))
else:
usd_price = 0
USD.append(usd_price)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image,
image_vendor)
# called by the crawler to get description links on a listing page
# @param: beautifulsoup object that is using the correct html page (listing page)
# return: list of description links from a listing page
def zeroday_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listings = soup.findAll('div', {"class": "ExploitTableContent"})
for listing in listings:
# Adding the url to the list of urls
description_link = listing.find('h3').find('a')['href']
href.append(description_link)
return href

Loading…
Cancel
Save