From 12c5c356f011181621d57e8d949968a76c118f80 Mon Sep 17 00:00:00 2001 From: Nathan Pham Date: Mon, 28 Aug 2023 21:42:09 -0700 Subject: [PATCH] add metaverse market and pabloescobar market --- .../MetaVerseMarket/crawler_selenium.py | 306 ++++++++++++++++++ MarketPlaces/MetaVerseMarket/parser.py | 285 ++++++++++++++++ .../PabloEscobarMarket/crawler_selenium.py | 268 +++++++++++++++ MarketPlaces/PabloEscobarMarket/parser.py | 288 +++++++++++++++++ 4 files changed, 1147 insertions(+) create mode 100644 MarketPlaces/MetaVerseMarket/crawler_selenium.py create mode 100644 MarketPlaces/MetaVerseMarket/parser.py create mode 100644 MarketPlaces/PabloEscobarMarket/crawler_selenium.py create mode 100644 MarketPlaces/PabloEscobarMarket/parser.py diff --git a/MarketPlaces/MetaVerseMarket/crawler_selenium.py b/MarketPlaces/MetaVerseMarket/crawler_selenium.py new file mode 100644 index 0000000..e624d51 --- /dev/null +++ b/MarketPlaces/MetaVerseMarket/crawler_selenium.py @@ -0,0 +1,306 @@ +__author__ = 'Helium' + +''' +MetaVerseMarket Marketplace Crawler (Selenium) +not complete +need to go through multiple pages... +''' + +from selenium import webdriver +from selenium.common.exceptions import NoSuchElementException +from selenium.webdriver.firefox.firefox_profile import FirefoxProfile +from selenium.webdriver.firefox.firefox_binary import FirefoxBinary +from selenium.webdriver.firefox.service import Service +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.by import By + +from PIL import Image +import urllib.parse as urlparse +import os, re, time +from datetime import date +import subprocess +import configparser +from bs4 import BeautifulSoup +from MarketPlaces.Initialization.prepare_parser import new_parse +from MarketPlaces.MetaVerseMarket.parser import metaversemarket_links_parser +from MarketPlaces.Utilities.utilities import cleanHTML + +counter = 1 +baseURL = 'http://mdbvvcfwl3fpckiraucv7gio57yoslnhfjxzpoihf4fgdkdd7bwyv7id.onion/login' + + +# Opens Tor Browser, crawls the website, then parses, then closes tor +#acts like the main method for the crawler, another function at the end of this code calls this function later +def startCrawling(): + # opentor() + mktName = getMKTName() + driver = getAccess() + + if driver != 'down': + try: + login(driver) + crawlForum(driver) + except Exception as e: + print(driver.current_url, e) + closetor(driver) + + new_parse(mktName, baseURL, True) + + +# Opens Tor Browser +#prompts for ENTER input to continue +def opentor(): + from MarketPlaces.Initialization.markets_mining import config + + global pid + print("Connecting Tor...") + pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) + pid = pro.pid + time.sleep(7.5) + input('Tor Connected. Press ENTER to continue\n') + return + + +# Returns the name of the website +#return: name of site in string type +def getMKTName(): + name = 'MetaVerseMarket' + return name + + +# Return the base link of the website +#return: url of base site in string type +def getFixedURL(): + url = 'http://mdbvvcfwl3fpckiraucv7gio57yoslnhfjxzpoihf4fgdkdd7bwyv7id.onion/login' + return url + + +# Closes Tor Browser +#@param: current selenium driver +def closetor(driver): + # global pid + # os.system("taskkill /pid " + str(pro.pid)) + # os.system("taskkill /t /f /im tor.exe") + print('Closing Tor...') + driver.close() + time.sleep(3) + return + + +# Creates FireFox 'driver' and configure its 'Profile' +# to use Tor proxy and socket +def createFFDriver(): + from MarketPlaces.Initialization.markets_mining import config + + ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) + + ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) + ff_prof.set_preference("places.history.enabled", False) + ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) + ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) + ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) + ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) + ff_prof.set_preference("signon.rememberSignons", False) + ff_prof.set_preference("network.cookie.lifetimePolicy", 2) + ff_prof.set_preference("network.dns.disablePrefetch", True) + ff_prof.set_preference("network.http.sendRefererHeader", 0) + ff_prof.set_preference("permissions.default.image", 1)## + ff_prof.set_preference("browser.download.folderList", 2) + ff_prof.set_preference("browser.download.manager.showWhenStarting", False) + ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") + ff_prof.set_preference('network.proxy.type', 1) + ff_prof.set_preference("network.proxy.socks_version", 5) + ff_prof.set_preference('network.proxy.socks', '127.0.0.1') + ff_prof.set_preference('network.proxy.socks_port', 9150) + ff_prof.set_preference('network.proxy.socks_remote_dns', True) + ff_prof.set_preference("javascript.enabled", False) + ff_prof.update_preferences() + + service = Service(config.get('TOR', 'geckodriver_path')) + + driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) + + driver.maximize_window() + + return driver + + +#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' +#return: return the selenium driver or string 'down' +def getAccess(): + url = getFixedURL() + driver = createFFDriver() + try: + driver.get(url) + return driver + except: + driver.close() + return 'down' + + +# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha +# then allows for manual solving of captcha in the terminal +#@param: current selenium web driver +def login(driver): + + # entering username and password into input boxes + usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]') + # Username here + usernameBox.send_keys('metotomoto') + passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]') + # Password here + passwordBox.send_keys('lionking_kumba1ya') + + input("Press ENTER when CAPTCHA is completed and you exit the newsletter\n") + + # wait for listing page show up (This Xpath may need to change based on different seed url) + WebDriverWait(driver, 100).until(EC.visibility_of_element_located( + (By.XPATH, '//*[@id="searchq"]'))) + +# Saves the crawled html page, makes the directory path for html pages if not made +def savePage(driver, page, url): + cleanPage = cleanHTML(driver, page) + filePath = getFullPathName(url) + os.makedirs(os.path.dirname(filePath), exist_ok=True) + open(filePath, 'wb').write(cleanPage.encode('utf-8')) + return + + +# Gets the full path of the page to be saved along with its appropriate file name +#@param: raw url as crawler crawls through every site +def getFullPathName(url): + from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE + + mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") + fileName = getNameFromURL(url) + if isDescriptionLink(url): + fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') + else: + fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') + return fullPath + + +# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned +#@param: raw url as crawler crawls through every site +def getNameFromURL(url): + global counter + name = ''.join(e for e in url if e.isalnum()) + if (name == ''): + name = str(counter) + counter = counter + 1 + return name + + +# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list +#in this example, there are a couple of categories some threads fall under such as +# Guides and Tutorials, Digital Products, and Software and Malware +#as you can see they are categories of products +def getInterestedLinks(): + links = [] + + # hacking + links.append('http://mdbvvcfwl3fpckiraucv7gio57yoslnhfjxzpoihf4fgdkdd7bwyv7id.onion/products/hacking') + # hosting + links.append('http://mdbvvcfwl3fpckiraucv7gio57yoslnhfjxzpoihf4fgdkdd7bwyv7id.onion/products/hosting') + # hacking guides and tutorials + links.append('http://mdbvvcfwl3fpckiraucv7gio57yoslnhfjxzpoihf4fgdkdd7bwyv7id.onion/products/hacking-guides-and-tutorials') + + return links + + +# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through +#topic and description pages are crawled through here, where both types of pages are saved +#@param: selenium driver +def crawlForum(driver): + print("Crawling the MetaVerse market") + + linksToCrawl = getInterestedLinks() + + i = 0 + while i < len(linksToCrawl): + link = linksToCrawl[i] + print('Crawling :', link) + try: + has_next_page = True + count = 0 + + while has_next_page: + try: + driver.get(link) + except: + driver.refresh() + html = driver.page_source + savePage(driver, html, link) + + list = productPages(html) + for item in list: + itemURL = urlparse.urljoin(baseURL, str(item)) + try: + driver.get(itemURL) + except: + driver.refresh() + savePage(driver, driver.page_source, item) + driver.back() + + # comment out + break + + # comment out + if count == 1: + break + + try: + link = driver.find_element(by=By.XPATH, value='//a[@class="page-link-next"]').get_attribute('href') + if link == "": + raise NoSuchElementException + count += 1 + + except NoSuchElementException: + has_next_page = False + + except Exception as e: + print(link, e) + i += 1 + + print("Crawling the MetaVerse market done.") + + +# Returns 'True' if the link is a description link +#@param: url of any url crawled +#return: true if is a description page, false if not +def isDescriptionLink(url): + if 'PR' in url: + return True + return False + + +# Returns True if the link is a listingPage link +#@param: url of any url crawled +#return: true if is a Listing page, false if not +def isListingLink(url): + if 'products' in url: + return True + return False + + +# calling the parser to define the links, the html is the url of a link from the list of interested link list +#@param: link from interested link list ie. getInterestingLinks() +#return: list of description links that should be crawled through +def productPages(html): + soup = BeautifulSoup(html, "html.parser") + return metaversemarket_links_parser(soup) + + +# Drop links that "signout" +# def isSignOut(url): +# #absURL = urlparse.urljoin(url.base_url, url.url) +# if 'signout' in url.lower() or 'logout' in url.lower(): +# return True +# +# return False + + +def crawler(): + startCrawling() + # print("Crawling and Parsing MetaVerseMarket .... DONE!") diff --git a/MarketPlaces/MetaVerseMarket/parser.py b/MarketPlaces/MetaVerseMarket/parser.py new file mode 100644 index 0000000..8c83293 --- /dev/null +++ b/MarketPlaces/MetaVerseMarket/parser.py @@ -0,0 +1,285 @@ +__author__ = 'DarkWeb' + +# Here, we are importing the auxiliary functions to clean or convert data +from MarketPlaces.Utilities.utilities import * + +# Here, we are importing BeautifulSoup to search through the HTML tree +from bs4 import BeautifulSoup + + +# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs +# stores info it needs in different lists, these lists are returned after being organized +# @param: soup object looking at html page of description page +# return: 'row' that contains a variety of lists that each hold info on the description page +def darkfox_description_parser(soup): + # Fields to be parsed + + name = "-1" # 0 Product_Name + describe = "-1" # 1 Product_Description + lastSeen = "-1" # 2 Product_LastViewDate + CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures) + MS = "-1" # 5 Product_MS_Classification (Microsoft Security) + review = "-1" # 6 Product_Number_Of_Reviews + category = "-1" # 7 Product_Category + shipFrom = "-1" # 8 Product_ShippedFrom + shipTo = "-1" # 9 Product_ShippedTo + left = "-1" # 10 Product_QuantityLeft + escrow = "-1" # 11 Vendor_Warranty + terms = "-1" # 12 Vendor_TermsAndConditions + vendor = "-1" # 13 Vendor_Name + sold = "-1" # 14 Product_QuantitySold + addDate = "-1" # 15 Product_AddedDate + BTC = "-1" # 18 Product_BTC_SellingPrice + USD = "-1" # 19 Product_USD_SellingPrice + rating = "-1" # 20 Vendor_Rating + success = "-1" # 21 Vendor_Successful_Transactions + EURO = "-1" # 22 Product_EURO_SellingPrice + + # Finding Product Name + name = soup.find('h1').text + name = name.replace('\n', ' ') + name = name.replace(",", "") + name = name.strip() + + # Finding Vendor + vendor = soup.find('h3').find('a').text.strip() + + # Finding Vendor Rating + rating = soup.find('span', {'class': "tag is-dark"}).text.strip() + + # Finding Successful Transactions + success = soup.find('h3').text + success = success.replace("Vendor: ", "") + success = success.replace(vendor, "") + success = success.replace("(", "") + success = success.replace(")", "") + success = success.strip() + + bae = soup.find('div', {'class': "box"}).find_all('ul') + + # Finding Prices + USD = bae[1].find('strong').text.strip() + + li = bae[2].find_all('li') + + # Finding Escrow + escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip() + + # Finding the Product Category + category = li[1].find('span', {'class': "tag is-dark"}).text.strip() + + # Finding the Product Quantity Available + left = li[3].find('span', {'class': "tag is-dark"}).text.strip() + + # Finding Number Sold + sold = li[4].find('span', {'class': "tag is-dark"}).text.strip() + + li = bae[3].find_all('li') + + # Finding Shipment Information (Origin) + if "Ships from:" in li[-2].text: + shipFrom = li[-2].text + shipFrom = shipFrom.replace("Ships from: ", "") + # shipFrom = shipFrom.replace(",", "") + shipFrom = shipFrom.strip() + + # Finding Shipment Information (Destination) + shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text + shipTo = shipTo.replace("Ships to: ", "") + shipTo = shipTo.strip() + if "certain countries" in shipTo: + countries = "" + tags = li[-1].find_all('span', {'class': "tag"}) + for tag in tags: + country = tag.text.strip() + countries += country + ", " + shipTo = countries.strip(", ") + + # Finding the Product description + describe = soup.find('div', {'class': "pre-line"}).text + describe = describe.replace("\n", " ") + describe = describe.strip() + + '''# Finding the Number of Product Reviews + tag = soup.findAll(text=re.compile('Reviews')) + for index in tag: + reviews = index + par = reviews.find('(') + if par >=0: + reviews = reviews.replace("Reviews (","") + reviews = reviews.replace(")","") + reviews = reviews.split(",") + review = str(abs(int(reviews[0])) + abs(int(reviews[1]))) + else : + review = "-1"''' + + # Searching for CVE and MS categories + cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) + if cve: + CVE = " " + for idx in cve: + CVE += (idx) + CVE += " " + CVE = CVE.replace(',', ' ') + CVE = CVE.replace('\n', '') + ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) + if ms: + MS = " " + for im in ms: + MS += (im) + MS += " " + MS = MS.replace(',', ' ') + MS = MS.replace('\n', '') + + # Populating the final variable (this should be a list with all fields scraped) + row = (name, describe, lastSeen, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor, + sold, addDate, BTC, USD, rating, success, EURO) + + # Sending the results + return row + + +# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs +# stores info it needs in different lists, these lists are returned after being organized +# @param: soup object looking at html page of listing page +# return: 'row' that contains a variety of lists that each hold info on the listing page +def darkfox_listing_parser(soup): + # Fields to be parsed + nm = 0 # Total_Products (Should be Integer) + mktName = "DarkFox" # 0 Marketplace_Name + name = [] # 1 Product_Name + CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) + MS = [] # 3 Product_MS_Classification (Microsoft Security) + category = [] # 4 Product_Category + describe = [] # 5 Product_Description + escrow = [] # 6 Vendor_Warranty + views = [] # 7 Product_Number_Of_Views + reviews = [] # 8 Product_Number_Of_Reviews + addDate = [] # 9 Product_AddDate + lastSeen = [] # 10 Product_LastViewDate + BTC = [] # 11 Product_BTC_SellingPrice + USD = [] # 12 Product_USD_SellingPrice + EURO = [] # 13 Product_EURO_SellingPrice + sold = [] # 14 Product_QuantitySold + qLeft = [] # 15 Product_QuantityLeft + shipFrom = [] # 16 Product_ShippedFrom + shipTo = [] # 17 Product_ShippedTo + vendor = [] # 18 Vendor + rating = [] # 19 Vendor_Rating + success = [] # 20 Vendor_Successful_Transactions + href = [] # 23 Product_Links (Urls) + + listing = soup.findAll('div', {"class": "card"}) + + # Populating the Number of Products + nm = len(listing) + + for a in listing: + bae = a.findAll('a', href=True) + + # Adding the url to the list of urls + link = bae[0].get('href') + link = cleanLink(link) + href.append(link) + + # Finding the Product + product = bae[1].find('p').text + product = product.replace('\n', ' ') + product = product.replace(",", "") + product = product.replace("...", "") + product = product.strip() + name.append(product) + + bae = a.find('div', {'class': "media-content"}).find('div').find_all('div') + + if len(bae) >= 5: + # Finding Prices + price = bae[0].text + ud = price.replace(" USD", " ") + # u = ud.replace("$","") + u = ud.replace(",", "") + u = u.strip() + USD.append(u) + # bc = (prc[1]).strip(' BTC') + # BTC.append(bc) + + # Finding the Vendor + vendor_name = bae[1].find('a').text + vendor_name = vendor_name.replace(",", "") + vendor_name = vendor_name.strip() + vendor.append(vendor_name) + + # Finding the Category + cat = bae[2].find('small').text + cat = cat.replace("Category: ", "") + cat = cat.replace(",", "") + cat = cat.strip() + category.append(cat) + + # Finding Number Sold and Quantity Left + num = bae[3].text + num = num.replace("Sold: ", "") + num = num.strip() + sold.append(num) + + quant = bae[4].find('small').text + quant = quant.replace("In stock: ", "") + quant = quant.strip() + qLeft.append(quant) + + # Finding Successful Transactions + freq = bae[1].text + freq = freq.replace(vendor_name, "") + freq = re.sub(r'Vendor Level \d+', "", freq) + freq = freq.replace("(", "") + freq = freq.replace(")", "") + freq = freq.strip() + success.append(freq) + + # Searching for CVE and MS categories + cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) + if not cve: + cveValue = "-1" + else: + cee = " " + for idx in cve: + cee += (idx) + cee += " " + cee = cee.replace(',', ' ') + cee = cee.replace('\n', '') + cveValue = cee + CVE.append(cveValue) + + ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) + if not ms: + MSValue = "-1" + else: + me = " " + for im in ms: + me += (im) + me += " " + me = me.replace(',', ' ') + me = me.replace('\n', '') + MSValue = me + MS.append(MSValue) + + # Populate the final variable (this should be a list with all fields scraped) + return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, + BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) + + +# called by the crawler to get description links on a listing page +# @param: beautifulsoup object that is using the correct html page (listing page) +# return: list of description links from a listing page +def metaversemarket_links_parser(soup): + # Returning all links that should be visited by the Crawler + + href = [] + listing = soup.findAll('div', {"class": "col-12 p-0"}) + + for a in listing: + bae = a.find('a', href=True) + link = bae['href'] + href.append(link) + + return href \ No newline at end of file diff --git a/MarketPlaces/PabloEscobarMarket/crawler_selenium.py b/MarketPlaces/PabloEscobarMarket/crawler_selenium.py new file mode 100644 index 0000000..cc9b890 --- /dev/null +++ b/MarketPlaces/PabloEscobarMarket/crawler_selenium.py @@ -0,0 +1,268 @@ +__author__ = 'DarkWeb' + +''' +PabloEscobarMarket Marketplace Crawler (Selenium) +''' + +from selenium import webdriver +from selenium.common.exceptions import NoSuchElementException +from selenium.webdriver.firefox.firefox_profile import FirefoxProfile +from selenium.webdriver.firefox.firefox_binary import FirefoxBinary +from selenium.webdriver.firefox.service import Service +from selenium.webdriver.common.by import By +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.ui import WebDriverWait + +from PIL import Image +import urllib.parse as urlparse +import os, re, time +import subprocess +from bs4 import BeautifulSoup +from MarketPlaces.Initialization.prepare_parser import new_parse +from MarketPlaces.PabloEscobarMarket.parser import pabloescobarmarket_links_parser +from MarketPlaces.Utilities.utilities import cleanHTML + +counter = 1 +baseURL = 'http://niejmptjzwhlfywruoab4pbuxg7kp2mtcr4c6mgpeykju5matewg36yd.onion/' + + +# Opens Tor Browser, crawls the website +def startCrawling(): + # opentor() + mktName = getMKTName() + driver = getAccess() + + if driver != 'down': + try: + login(driver) + crawlForum(driver) + except Exception as e: + print(driver.current_url, e) + closetor(driver) + + new_parse(mktName, baseURL, True) + + +# Opens Tor Browser +def opentor(): + from MarketPlaces.Initialization.markets_mining import config + + global pid + print("Connecting Tor...") + pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) + pid = pro.pid + time.sleep(7.5) + input('Tor Connected. Press ENTER to continue\n') + return + + +# Login using premade account credentials and do login captcha manually +def login(driver): + input("Press ENTER when CAPTCHA is complete and login page has loaded\n") + + # entering username and password into input boxes + usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]') + # Username here + usernameBox.send_keys('snorlaxrights') + passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="inputPassword3"]') + # Password here + passwordBox.send_keys('$noringAllday') + + input("Press ENTER when CAPTCHA is completed\n") + + # wait for listing page show up (This Xpath may need to change based on different seed url) + # wait for 50 sec until id = tab_content is found, then cont + WebDriverWait(driver, 50).until(EC.visibility_of_element_located( + (By.XPATH, '//*[@id="collapse3"]'))) + + +# Returns the name of the website +def getMKTName() -> str: + name = 'PabloEscobarMarket' + return name + + +# Return the link of the website +def getFixedURL(): + url = 'http://niejmptjzwhlfywruoab4pbuxg7kp2mtcr4c6mgpeykju5matewg36yd.onion/' + return url + + +# Closes Tor Browser +def closetor(driver): + # global pid + # os.system("taskkill /pid " + str(pro.pid)) + # os.system("taskkill /t /f /im tor.exe") + print('Closing Tor...') + driver.close() #close tab + time.sleep(3) + return + + +# Creates FireFox 'driver' and configure its 'Profile' +# to use Tor proxy and socket +def createFFDriver(): + from MarketPlaces.Initialization.markets_mining import config + + ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) + + ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) + ff_prof.set_preference("places.history.enabled", False) + ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) + ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) + ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) + ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) + ff_prof.set_preference("signon.rememberSignons", False) + ff_prof.set_preference("network.cookie.lifetimePolicy", 2) + ff_prof.set_preference("network.dns.disablePrefetch", True) + ff_prof.set_preference("network.http.sendRefererHeader", 0) + ff_prof.set_preference("permissions.default.image", 3) + ff_prof.set_preference("browser.download.folderList", 2) + ff_prof.set_preference("browser.download.manager.showWhenStarting", False) + ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") + ff_prof.set_preference('network.proxy.type', 1) + ff_prof.set_preference("network.proxy.socks_version", 5) + ff_prof.set_preference('network.proxy.socks', '127.0.0.1') + ff_prof.set_preference('network.proxy.socks_port', 9150) + ff_prof.set_preference('network.proxy.socks_remote_dns', True) + ff_prof.set_preference("javascript.enabled", True) + ff_prof.update_preferences() + + service = Service(config.get('TOR', 'geckodriver_path')) + + driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) + + return driver + + +def getAccess(): + url = getFixedURL() + driver = createFFDriver() + try: + driver.get(url) + return driver + except: + driver.close() + return 'down' + + +# Saves the crawled html page +def savePage(driver, page, url): + cleanPage = cleanHTML(driver, page) + filePath = getFullPathName(url) + os.makedirs(os.path.dirname(filePath), exist_ok=True) + open(filePath, 'wb').write(cleanPage.encode('utf-8')) + return + + +# Gets the full path of the page to be saved along with its appropriate file name +def getFullPathName(url): + from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE + + mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") + fileName = getNameFromURL(url) + if isDescriptionLink(url): + fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') + else: + fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') + return fullPath + + +# Creates the file name from passed URL +def getNameFromURL(url): + global counter + name = ''.join(e for e in url if e.isalnum()) + if name == '': + name = str(counter) + counter = counter + 1 + return name + +# FIX +def getInterestedLinks(): + links = [] + + # hire hacker + links.append('http://niejmptjzwhlfywruoab4pbuxg7kp2mtcr4c6mgpeykju5matewg36yd.onion/?sub_id=36') + # hacker + links.append('http://niejmptjzwhlfywruoab4pbuxg7kp2mtcr4c6mgpeykju5matewg36yd.onion/?sub_id=34') + + return links + + +def crawlForum(driver): + print("Crawling the PabloEscobarMarket market") + + linksToCrawl = getInterestedLinks() + + i = 0 + while i < len(linksToCrawl): + link = linksToCrawl[i] + print('Crawling :', link) + try: + has_next_page = True + count = 0 + + while has_next_page: + try: + driver.get(link) + except: + driver.refresh() + html = driver.page_source + savePage(driver, html, link) + + list = productPages(html) + for item in list: + itemURL = urlparse.urljoin(baseURL, str(item)) + try: + driver.get(itemURL) + except: + driver.refresh() + savePage(driver, driver.page_source, item) + driver.back() + + # comment out + break + + # comment out + if count == 1: + break + + try: + link = driver.find_element(by=By.XPATH, value='//a[@rel="next"]').get_attribute('href') + if link == "": + raise NoSuchElementException + count += 1 + + except NoSuchElementException: + has_next_page = False + + except Exception as e: + print(link, e) + i += 1 + + print("Crawling the PabloEscobarMarket market done.") + + +# Returns 'True' if the link is Topic link, may need to change for every website +def isDescriptionLink(url): + if 'single_product' in url: + return True + return False + + +# Returns True if the link is a listingPage link, may need to change for every website +def isListingLink(url): + if 'sub_id' in url: + return True + return False + + +# calling the parser to define the links +def productPages(html): + soup = BeautifulSoup(html, "html.parser") + return pabloescobarmarket_links_parser(soup) + + +def crawler(): + startCrawling() + # print("Crawling and Parsing PabloEscobarMarket .... DONE!") diff --git a/MarketPlaces/PabloEscobarMarket/parser.py b/MarketPlaces/PabloEscobarMarket/parser.py new file mode 100644 index 0000000..be89b24 --- /dev/null +++ b/MarketPlaces/PabloEscobarMarket/parser.py @@ -0,0 +1,288 @@ +__author__ = 'DarkWeb' + +# Here, we are importing the auxiliary functions to clean or convert data +from MarketPlaces.Utilities.utilities import * + +# Here, we are importing BeautifulSoup to search through the HTML tree +from bs4 import BeautifulSoup + + +# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs +# stores info it needs in different lists, these lists are returned after being organized +# @param: soup object looking at html page of description page +# return: 'row' that contains a variety of lists that each hold info on the description page +def darkfox_description_parser(soup): + # Fields to be parsed + + name = "-1" # 0 Product_Name + describe = "-1" # 1 Product_Description + lastSeen = "-1" # 2 Product_LastViewDate + rules = "-1" # 3 NOT USED ... + CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures) + MS = "-1" # 5 Product_MS_Classification (Microsoft Security) + review = "-1" # 6 Product_Number_Of_Reviews + category = "-1" # 7 Product_Category + shipFrom = "-1" # 8 Product_ShippedFrom + shipTo = "-1" # 9 Product_ShippedTo + left = "-1" # 10 Product_QuantityLeft + escrow = "-1" # 11 Vendor_Warranty + terms = "-1" # 12 Vendor_TermsAndConditions + vendor = "-1" # 13 Vendor_Name + sold = "-1" # 14 Product_QuantitySold + addDate = "-1" # 15 Product_AddedDate + available = "-1" # 16 NOT USED ... + endDate = "-1" # 17 NOT USED ... + BTC = "-1" # 18 Product_BTC_SellingPrice + USD = "-1" # 19 Product_USD_SellingPrice + rating = "-1" # 20 Vendor_Rating + success = "-1" # 21 Vendor_Successful_Transactions + EURO = "-1" # 22 Product_EURO_SellingPrice + + # Finding Product Name + name = soup.find('h1').text + name = name.replace('\n', ' ') + name = name.replace(",", "") + name = name.strip() + + # Finding Vendor + vendor = soup.find('h3').find('a').text.strip() + + # Finding Vendor Rating + rating = soup.find('span', {'class': "tag is-dark"}).text.strip() + + # Finding Successful Transactions + success = soup.find('h3').text + success = success.replace("Vendor: ", "") + success = success.replace(vendor, "") + success = success.replace("(", "") + success = success.replace(")", "") + success = success.strip() + + bae = soup.find('div', {'class': "box"}).find_all('ul') + + # Finding Prices + USD = bae[1].find('strong').text.strip() + + li = bae[2].find_all('li') + + # Finding Escrow + escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip() + + # Finding the Product Category + category = li[1].find('span', {'class': "tag is-dark"}).text.strip() + + # Finding the Product Quantity Available + left = li[3].find('span', {'class': "tag is-dark"}).text.strip() + + # Finding Number Sold + sold = li[4].find('span', {'class': "tag is-dark"}).text.strip() + + li = bae[3].find_all('li') + + # Finding Shipment Information (Origin) + if "Ships from:" in li[-2].text: + shipFrom = li[-2].text + shipFrom = shipFrom.replace("Ships from: ", "") + # shipFrom = shipFrom.replace(",", "") + shipFrom = shipFrom.strip() + + # Finding Shipment Information (Destination) + shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text + shipTo = shipTo.replace("Ships to: ", "") + shipTo = shipTo.strip() + if "certain countries" in shipTo: + countries = "" + tags = li[-1].find_all('span', {'class': "tag"}) + for tag in tags: + country = tag.text.strip() + countries += country + ", " + shipTo = countries.strip(", ") + + # Finding the Product description + describe = soup.find('div', {'class': "pre-line"}).text + describe = describe.replace("\n", " ") + describe = describe.strip() + + '''# Finding the Number of Product Reviews + tag = soup.findAll(text=re.compile('Reviews')) + for index in tag: + reviews = index + par = reviews.find('(') + if par >=0: + reviews = reviews.replace("Reviews (","") + reviews = reviews.replace(")","") + reviews = reviews.split(",") + review = str(abs(int(reviews[0])) + abs(int(reviews[1]))) + else : + review = "-1"''' + + # Searching for CVE and MS categories + cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) + if cve: + CVE = " " + for idx in cve: + CVE += (idx) + CVE += " " + CVE = CVE.replace(',', ' ') + CVE = CVE.replace('\n', '') + ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) + if ms: + MS = " " + for im in ms: + MS += (im) + MS += " " + MS = MS.replace(',', ' ') + MS = MS.replace('\n', '') + + # Populating the final variable (this should be a list with all fields scraped) + row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor, + sold, addDate, available, endDate, BTC, USD, rating, success, EURO) + + # Sending the results + return row + + +# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs +# stores info it needs in different lists, these lists are returned after being organized +# @param: soup object looking at html page of listing page +# return: 'row' that contains a variety of lists that each hold info on the listing page +def darkfox_listing_parser(soup): + # Fields to be parsed + nm = 0 # Total_Products (Should be Integer) + mktName = "DarkFox" # 0 Marketplace_Name + name = [] # 1 Product_Name + CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) + MS = [] # 3 Product_MS_Classification (Microsoft Security) + category = [] # 4 Product_Category + describe = [] # 5 Product_Description + escrow = [] # 6 Vendor_Warranty + views = [] # 7 Product_Number_Of_Views + reviews = [] # 8 Product_Number_Of_Reviews + addDate = [] # 9 Product_AddDate + lastSeen = [] # 10 Product_LastViewDate + BTC = [] # 11 Product_BTC_SellingPrice + USD = [] # 12 Product_USD_SellingPrice + EURO = [] # 13 Product_EURO_SellingPrice + sold = [] # 14 Product_QuantitySold + qLeft = [] # 15 Product_QuantityLeft + shipFrom = [] # 16 Product_ShippedFrom + shipTo = [] # 17 Product_ShippedTo + vendor = [] # 18 Vendor + rating = [] # 19 Vendor_Rating + success = [] # 20 Vendor_Successful_Transactions + href = [] # 23 Product_Links (Urls) + + listing = soup.findAll('div', {"class": "card"}) + + # Populating the Number of Products + nm = len(listing) + + for a in listing: + bae = a.findAll('a', href=True) + + # Adding the url to the list of urls + link = bae[0].get('href') + link = cleanLink(link) + href.append(link) + + # Finding the Product + product = bae[1].find('p').text + product = product.replace('\n', ' ') + product = product.replace(",", "") + product = product.replace("...", "") + product = product.strip() + name.append(product) + + bae = a.find('div', {'class': "media-content"}).find('div').find_all('div') + + if len(bae) >= 5: + # Finding Prices + price = bae[0].text + ud = price.replace(" USD", " ") + # u = ud.replace("$","") + u = ud.replace(",", "") + u = u.strip() + USD.append(u) + # bc = (prc[1]).strip(' BTC') + # BTC.append(bc) + + # Finding the Vendor + vendor_name = bae[1].find('a').text + vendor_name = vendor_name.replace(",", "") + vendor_name = vendor_name.strip() + vendor.append(vendor_name) + + # Finding the Category + cat = bae[2].find('small').text + cat = cat.replace("Category: ", "") + cat = cat.replace(",", "") + cat = cat.strip() + category.append(cat) + + # Finding Number Sold and Quantity Left + num = bae[3].text + num = num.replace("Sold: ", "") + num = num.strip() + sold.append(num) + + quant = bae[4].find('small').text + quant = quant.replace("In stock: ", "") + quant = quant.strip() + qLeft.append(quant) + + # Finding Successful Transactions + freq = bae[1].text + freq = freq.replace(vendor_name, "") + freq = re.sub(r'Vendor Level \d+', "", freq) + freq = freq.replace("(", "") + freq = freq.replace(")", "") + freq = freq.strip() + success.append(freq) + + # Searching for CVE and MS categories + cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) + if not cve: + cveValue = "-1" + else: + cee = " " + for idx in cve: + cee += (idx) + cee += " " + cee = cee.replace(',', ' ') + cee = cee.replace('\n', '') + cveValue = cee + CVE.append(cveValue) + + ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) + if not ms: + MSValue = "-1" + else: + me = " " + for im in ms: + me += (im) + me += " " + me = me.replace(',', ' ') + me = me.replace('\n', '') + MSValue = me + MS.append(MSValue) + + # Populate the final variable (this should be a list with all fields scraped) + return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, + BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) + + +# called by the crawler to get description links on a listing page +# @param: beautifulsoup object that is using the correct html page (listing page) +# return: list of description links from a listing page +def metaversemarket_links_parser(soup): + # Returning all links that should be visited by the Crawler + + href = [] + listing = soup.findAll('div', {"class": "col-12 p-0"}) + + for a in listing: + bae = a.find('a', href=True) + link = bae['href'] + href.append(link) + + return href \ No newline at end of file