From 2731a8cabdcbe0ffad79d32f0684f43999202ada Mon Sep 17 00:00:00 2001 From: Nathan Pham Date: Mon, 28 Aug 2023 00:25:41 -0700 Subject: [PATCH] add DarkBazar --- MarketPlaces/DarkBazar/crawler_selenium.py | 252 ++++++++++++++++++++ MarketPlaces/DarkBazar/parser.py | 264 +++++++++++++++++++++ 2 files changed, 516 insertions(+) create mode 100644 MarketPlaces/DarkBazar/crawler_selenium.py create mode 100644 MarketPlaces/DarkBazar/parser.py diff --git a/MarketPlaces/DarkBazar/crawler_selenium.py b/MarketPlaces/DarkBazar/crawler_selenium.py new file mode 100644 index 0000000..6d19a9f --- /dev/null +++ b/MarketPlaces/DarkBazar/crawler_selenium.py @@ -0,0 +1,252 @@ +__author__ = 'Helium' + +''' +DarkBazar Marketplace Crawler (Selenium) +''' + +from selenium import webdriver +from selenium.common.exceptions import NoSuchElementException +from selenium.webdriver.firefox.firefox_profile import FirefoxProfile +from selenium.webdriver.firefox.firefox_binary import FirefoxBinary +from selenium.webdriver.firefox.service import Service +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.by import By + +from PIL import Image +import urllib.parse as urlparse +import os, re, time +from datetime import date +import subprocess +import configparser +from bs4 import BeautifulSoup +from MarketPlaces.Initialization.prepare_parser import new_parse +from MarketPlaces.DarkBazar.parser import darkbazar_links_parser +from MarketPlaces.Utilities.utilities import cleanHTML + +counter = 1 +baseURL = 'http://jw5e5sdywqupaqgtt43uq5ysfqpd2vzefl65s2fcjlj4qfxivynv6bqd.onion/log.php' + +def login(driver): + input("Press ENTER when CAPTCHA is complete and login page has loaded\n") + + # entering username and password into input boxes + usernameBox = driver.find_element(by=By.XPATH, value='//input[@name="username"]') + # Username here + usernameBox.send_keys('aliciamykeys') + passwordBox = driver.find_element(by=By.XPATH, value='//input[@name="password"]') + # Password here + passwordBox.send_keys('aliciawherearemykey$') + + input("Press ENTER when CAPTCHA is completed and you exit the newsletter\n") + + # wait for listing page show up (This Xpath may need to change based on different seed url) + WebDriverWait(driver, 100).until(EC.visibility_of_element_located( + (By.XPATH, '//*[@id="submit"]'))) + +def crawlForum(driver): + + print("Crawling the DarkBazar market") + + linksToCrawl = getInterestedLinks() + + i = 0 + while i < len(linksToCrawl): + link = linksToCrawl[i] + print('Crawling :', link) + try: + has_next_page = True + count = 0 + + while has_next_page: + try: + driver.get(link) + except: + driver.refresh() + html = driver.page_source + savePage(html, link) + + list = productPages(html) + + for item in list: + itemURL = urlparse.urljoin(baseURL, str(item)) + try: + driver.get(itemURL) + except: + driver.refresh() + savePage(driver.page_source, item) + driver.back() + + # comment out + break + + # comment out + if count == 1: + break + + try: + link = driver.find_element(by=By.XPATH, value='//a[contains(text(), "Next")]').get_attribute('href') + if link == "": + raise NoSuchElementException + count += 1 + + except NoSuchElementException: + has_next_page = False + + except Exception as e: + print(link, e) + i += 1 + + input("Crawling DarkBazar forum done sucessfully. Press ENTER to continue\n") + + +def savePage(page, url): + cleanPage = cleanHTML(page) + filePath = getFullPathName(url) + os.makedirs(os.path.dirname(filePath), exist_ok=True) + open(filePath, 'wb').write(cleanPage.encode('utf-8')) + return + +def getInterestedLinks(): + links = [] + + links.append('http://jw5e5sdywqupaqgtt43uq5ysfqpd2vzefl65s2fcjlj4qfxivynv6bqd.onion/cat.php?category=5') + + return links + +def crawler(): + startCrawling() +def startCrawling(): + opentor() + mktName = getMKTName() + driver = getAccess() + + if driver != 'down': + try: + login(driver) + crawlForum(driver) + except Exception as e: + print(driver.current_url, e) + closetor(driver) + + # new_parse(mktName, baseURL, False) + +# Opens Tor Browser +def opentor(): + from MarketPlaces.Initialization.markets_mining import config + + global pid + print("Connecting Tor...") + pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) + pid = pro.pid + time.sleep(7.5) + input('Tor Connected. Press ENTER to continue\n') + return + +# Returns the name of the website +def getMKTName(): + name = 'DarkBazar' + return name + +# Return the base link of the website +def getFixedURL(): + url = 'http://jw5e5sdywqupaqgtt43uq5ysfqpd2vzefl65s2fcjlj4qfxivynv6bqd.onion/log.php' + return url + + +# Closes Tor Browser +def closetor(driver): + # global pid + # os.system("taskkill /pid " + str(pro.pid)) + # os.system("taskkill /t /f /im tor.exe") + print('Closing Tor...') + driver.close() + time.sleep(3) + return + +# Creates FireFox 'driver' and configure its 'Profile' +# to use Tor proxy and socket +def createFFDriver(): + from MarketPlaces.Initialization.markets_mining import config + + ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) + + ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) + ff_prof.set_preference("places.history.enabled", False) + ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) + ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) + ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) + ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) + ff_prof.set_preference("signon.rememberSignons", False) + ff_prof.set_preference("network.cookie.lifetimePolicy", 2) + ff_prof.set_preference("network.dns.disablePrefetch", True) + ff_prof.set_preference("network.http.sendRefererHeader", 0) + ff_prof.set_preference("permissions.default.image", 2) + ff_prof.set_preference("browser.download.folderList", 2) + ff_prof.set_preference("browser.download.manager.showWhenStarting", False) + ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") + ff_prof.set_preference('network.proxy.type', 1) + ff_prof.set_preference("network.proxy.socks_version", 5) + ff_prof.set_preference('network.proxy.socks', '127.0.0.1') + ff_prof.set_preference('network.proxy.socks_port', 9150) + ff_prof.set_preference('network.proxy.socks_remote_dns', True) + ff_prof.set_preference("javascript.enabled", False) + ff_prof.update_preferences() + + service = Service(config.get('TOR', 'geckodriver_path')) + + driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) + + return driver + +def productPages(html): + soup = BeautifulSoup(html, "html.parser") + return darkbazar_links_parser(soup) + +#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' +def getAccess(): + url = getFixedURL() + driver = createFFDriver() + try: + driver.get(url) + return driver + except: + driver.close() + return 'down' + +def getFullPathName(url): + from Forums.Initialization.forums_mining import config, CURRENT_DATE + + mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages") + fileName = getNameFromURL(url) + if isDescriptionLink(url): + fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') + else: + fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') + return fullPath + +# Returns 'True' if the link is Topic link, may need to change for every website +def isDescriptionLink(url): + if 'item' in url: + return True + return False + + +# Returns True if the link is a listingPage link, may need to change for every website +def isListingLink(url): + if 'category=' in url: + return True + return False + +def getForumName() -> str: + name = 'DarkBazar' + return name + +def getNameFromURL(url): + global counter + name = ''.join(e for e in url if e.isalnum()) + if name == '': + name = str(counter) + counter = counter + 1 + return name + diff --git a/MarketPlaces/DarkBazar/parser.py b/MarketPlaces/DarkBazar/parser.py new file mode 100644 index 0000000..59993d1 --- /dev/null +++ b/MarketPlaces/DarkBazar/parser.py @@ -0,0 +1,264 @@ +# Here, we are importing the auxiliary functions to clean or convert data +from MarketPlaces.Utilities.utilities import * + +# Here, we are importing BeautifulSoup to search through the HTML tree +from bs4 import BeautifulSoup + + +# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs +# stores info it needs in different lists, these lists are returned after being organized +# @param: soup object looking at html page of description page +# return: 'row' that contains a variety of lists that each hold info on the description page +def darkbazar_description_parser(soup): + # Fields to be parsed + + vendor = "-1" # 0 *Vendor_Name + success = "-1" # 1 Vendor_Successful_Transactions + rating_vendor = "-1" # 2 Vendor_Rating + name = "-1" # 3 *Product_Name + describe = "-1" # 4 Product_Description + CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) + MS = "-1" # 6 Product_MS_Classification (Microsoft Security) + category = "-1" # 7 Product_Category + views = "-1" # 8 Product_Number_Of_Views + reviews = "-1" # 9 Product_Number_Of_Reviews + rating_item = "-1" # 10 Product_Rating + addDate = "-1" # 11 Product_AddedDate + BTC = "-1" # 12 Product_BTC_SellingPrice + USD = "-1" # 13 Product_USD_SellingPrice + EURO = "-1" # 14 Product_EURO_SellingPrice + sold = "-1" # 15 Product_QuantitySold + left = "-1" # 16 Product_QuantityLeft + shipFrom = "-1" # 17 Product_ShippedFrom + shipTo = "-1" # 18 Product_ShippedTo + + # Finding Product Name + divmb = soup.findAll('div', {'class': "mb-1"}) + + name = divmb[0].text + name = name.replace('\n', ' ') + name = name.replace(",", "") + name = name.strip() + + # Finding Vendor + vendor = divmb[1].find('a').text.strip() + + # Finding Vendor Rating + rating = soup.find('div', {'class': ""}).text + rating = rating.replace("Vendor's Review : ", "") + rating = rating.strip() + + # Finding Successful Transactions + success = divmb[3].text + success = success.replace("Level:", "") + success = success.strip() + + # Finding Prices + USD = soup.find('div', {'class': "h3 text - primary"}).text.strip() + + + # Finding Escrow + escrow = divmb[5].find('span', {'class': "badge badge-danger"}).text.strip() + + # Finding the Product Category + pmb = soup.findAll('p', {'class': "mb-1"}) + + category = pmb[-1].text + category = category.replace("Category: ", "").strip() + + # Finding the Product Quantity Available + left = divmb[-1].text + left = left.split(",", 1)[1] + left = left.replace("in stock", "") + left = left.strip() + + # Finding Number Sold + sold = divmb[-1].text + sold = sold.split(",", 1)[0] + sold = sold.replace("sold", "") + sold = sold.strip() + + + # Finding Shipment Information (Origin) + pmb[0].text + shipFrom = shipFrom.replace("Ships from: ", "").strip() + + # Finding Shipment Information (Destination) + pmb[1].text + shipTo = shipTo.replace("Ships to: ", "").strip() + + # Finding the Product description + cardbody = soup.findAll('div', {'class': "card-body"}) + describe = cardbody[1].text.strip() + + # Finding the Number of Product Reviews + reviews = reviews.find('div', {'class': "product-rating"}).text + reviews = reviews.replace("(", "") + reviews = reviews.replace(" review)", "") + reviews = reviews.strip() + + + # Searching for CVE and MS categories + cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) + if cve: + CVE = " " + for idx in cve: + CVE += (idx) + CVE += " " + CVE = CVE.replace(',', ' ') + CVE = CVE.replace('\n', '') + ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) + if ms: + MS = " " + for im in ms: + MS += (im) + MS += " " + MS = MS.replace(',', ' ') + MS = MS.replace('\n', '') + + # Populating the final variable (this should be a list with all fields scraped) + row = (name, describe, CVE, MS, review, category, shipFrom, shipTo, left, escrow, vendor, + sold, addDate, BTC, USD, rating, success, EURO) + + # Sending the results + return row + + +# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs +# stores info it needs in different lists, these lists are returned after being organized +# @param: soup object looking at html page of listing page +# return: 'row' that contains a variety of lists that each hold info on the listing page +def darkbazar_listing_parser(soup): + # Fields to be parsed + nm = 0 # Total_Products (Should be Integer) + mktName = "DarkBazar" # 0 Marketplace_Name + name = [] # 1 Product_Name + CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) + MS = [] # 3 Product_MS_Classification (Microsoft Security) + category = [] # 4 Product_Category + describe = [] # 5 Product_Description + escrow = [] # 6 Vendor_Warranty + views = [] # 7 Product_Number_Of_Views + reviews = [] # 8 Product_Number_Of_Reviews + addDate = [] # 9 Product_AddDate + lastSeen = [] # 10 Product_LastViewDate + BTC = [] # 11 Product_BTC_SellingPrice + USD = [] # 12 Product_USD_SellingPrice + EURO = [] # 13 Product_EURO_SellingPrice + sold = [] # 14 Product_QuantitySold + qLeft = [] # 15 Product_QuantityLeft + shipFrom = [] # 16 Product_ShippedFrom + shipTo = [] # 17 Product_ShippedTo + vendor = [] # 18 Vendor + rating = [] # 19 Vendor_Rating + success = [] # 20 Vendor_Successful_Transactions + href = [] # 23 Product_Links (Urls) + + listing = soup.findAll('div', {"id": "itembox"}) + + # Populating the Number of Products + nm = len(listing) + + for a in listing: + bae = a.findAll('a', href=True) + lb = a.findAll('div', {"id": "littlebox"}) + + # Adding the url to the list of urls + link = bae[0].get('href') + link = cleanLink(link) + href.append(link) + + # Finding the Product + product = lb[1].find('a').text + product = product.replace('\n', ' ') + product = product.replace(",", "") + product = product.replace("...", "") + product = product.strip() + name.append(product) + + # Finding Prices + price = lb[-1].find('div', {"class": "mb-1"}).text + price = price.replace("$","") + price = price.strip() + USD.append(price) + + # Finding the Vendor + vendor_name = lb[-1].find("a").text + vendor_name = vendor_name.replace(",", "") + vendor_name = vendor_name.strip() + vendor.append(vendor_name) + + # Finding the Category + cat = lb[-1].find("span").text + cat = cat.replace("class:", "") + cat = cat.strip() + category.append(cat) + + # Finding Number Sold and Quantity Left + span = lb[1].findAll("span") + num = span[-1].text + num = num.replace("Sold:", "") + num = num.strip() + category.append(num) + + quant = span[1].text + quant = quant.replace("stock:", "") + quant = quant.strip() + qLeft.append(quant) + + + # Searching for CVE and MS categories + cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) + if not cve: + cveValue = "-1" + else: + cee = " " + for idx in cve: + cee += (idx) + cee += " " + cee = cee.replace(',', ' ') + cee = cee.replace('\n', '') + cveValue = cee + CVE.append(cveValue) + + ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) + if not ms: + MSValue = "-1" + else: + me = " " + for im in ms: + me += (im) + me += " " + me = me.replace(',', ' ') + me = me.replace('\n', '') + MSValue = me + MS.append(MSValue) + + # Populate the final variable (this should be a list with all fields scraped) + return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, + BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) + + +# called by the crawler to get description links on a listing page +# @param: beautifulsoup object that is using the correct html page (listing page) +# return: list of description links from a listing page +def darkbazar_links_parser(soup): + # Returning all links that should be visited by the Crawler + + href = [] + listing = soup.findAll('div', {"id": "itembox"}) + + # for a in listing: + # bae = a.find('a', {"class": "text-info"}, href=True) + # link = bae['href'] + # href.append(link) + + for a in listing: + bae = a.findAll('a', href=True) + + # Adding the url to the list of urls + link = bae[0].get('href') + link = cleanLink(link) + href.append(link) + + return href \ No newline at end of file