diff --git a/MarketPlaces/DarkMarket/crawler_selenium.py b/MarketPlaces/DarkMarket/crawler_selenium.py new file mode 100644 index 0000000..3ed52b7 --- /dev/null +++ b/MarketPlaces/DarkMarket/crawler_selenium.py @@ -0,0 +1,301 @@ +__author__ = 'Helium' + +''' +Dark Market Marketplace Crawler (Selenium) +not complete +need to go through multiple pages... +''' + +from selenium import webdriver +from selenium.common.exceptions import NoSuchElementException +from selenium.webdriver.firefox.firefox_profile import FirefoxProfile +from selenium.webdriver.firefox.firefox_binary import FirefoxBinary +from selenium.webdriver.firefox.service import Service +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.common.by import By + +from PIL import Image +import urllib.parse as urlparse +import os, re, time +from datetime import date +import subprocess +import configparser +from bs4 import BeautifulSoup +from MarketPlaces.Initialization.prepare_parser import new_parse +from MarketPlaces.DarkMarket.parser import darkmarket_links_parser +from MarketPlaces.Utilities.utilities import cleanHTML + +counter = 1 +baseURL = 'http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/' + +# Opens Tor Browser, crawls the website, then parses, then closes tor +#acts like the main method for the crawler, another function at the end of this code calls this function later +def startCrawling(): + mktName = getMKTName() + driver = getAccess() + + if driver != 'down': + try: + crawlForum(driver) + except Exception as e: + print(driver.current_url, e) + closeDriver(driver) + + new_parse(mktName, baseURL, True) + + +# Returns the name of the website +#return: name of site in string type +def getMKTName(): + name = 'DarkMarket' + return name + + +# Return the base link of the website +#return: url of base site in string type +def getFixedURL(): + url = 'http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/' + return url + + +# Closes Tor Browser +#@param: current selenium driver +def closeDriver(driver): + # global pid + # os.system("taskkill /pid " + str(pro.pid)) + # os.system("taskkill /t /f /im tor.exe") + print('Closing Tor...') + driver.close() + time.sleep(3) + return + + +# Creates FireFox 'driver' and configure its 'Profile' +# to use Tor proxy and socket +def createFFDriver(): + from MarketPlaces.Initialization.markets_mining import config + + ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) + + ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) + ff_prof.set_preference("places.history.enabled", False) + ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) + ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) + ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) + ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) + ff_prof.set_preference("signon.rememberSignons", False) + ff_prof.set_preference("network.cookie.lifetimePolicy", 2) + ff_prof.set_preference("network.dns.disablePrefetch", True) + ff_prof.set_preference("network.http.sendRefererHeader", 0) + ff_prof.set_preference("permissions.default.image", 3) + ff_prof.set_preference("browser.download.folderList", 2) + ff_prof.set_preference("browser.download.manager.showWhenStarting", False) + ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") + ff_prof.set_preference('network.proxy.type', 1) + ff_prof.set_preference("network.proxy.socks_version", 5) + ff_prof.set_preference('network.proxy.socks', '127.0.0.1') + ff_prof.set_preference('network.proxy.socks_port', 9150) + ff_prof.set_preference('network.proxy.socks_remote_dns', True) + ff_prof.set_preference("javascript.enabled", False) + ff_prof.update_preferences() + + service = Service(config.get('TOR', 'geckodriver_path')) + + driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) + + driver.maximize_window() + + return driver + + +#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' +#return: return the selenium driver or string 'down' +def getAccess(): + url = getFixedURL() + driver = createFFDriver() + try: + driver.get(url) + return driver + except: + driver.close() + return 'down' + + + + +# Saves the crawled html page, makes the directory path for html pages if not made +def savePage(driver, page, url): + cleanPage = cleanHTML(driver, page) + filePath = getFullPathName(url) + os.makedirs(os.path.dirname(filePath), exist_ok=True) + open(filePath, 'wb').write(cleanPage.encode('utf-8')) + return + + +# Gets the full path of the page to be saved along with its appropriate file name +#@param: raw url as crawler crawls through every site +def getFullPathName(url): + from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE + + mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") + fileName = getNameFromURL(url) + if isListingLink(url): + fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') + elif isDescriptionLink(url): + fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') + else: + fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') + + return fullPath + + +# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned +#@param: raw url as crawler crawls through every site +def getNameFromURL(url): + global counter + name = ''.join(e for e in url if e.isalnum()) + if name == '': + name = str(counter) + counter = counter + 1 + return name + + +# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list +#in this example, there are a couple of categories some threads fall under such as +# Guides and Tutorials, Digital Products, and Software and Malware +#as you can see they are categories of products +def getInterestedLinks(): + links = [] + + #Home for testing the next button + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/') + #Softwares + links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/softwares/') + # Guides and Training + links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/guides-and-training/') + #Tools and Other Accounts + links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/tools-and-other-accounts/') + #PayPal Transfers + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/paypal-transfers/') + #Cash App Transfers + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/cash-app-transfers/') + #Bank logs + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/bank-logs-%f0%9f%a4%91/') + #Carded Products + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/carded-products-%f0%9f%92%8e/') + #Cloned Cards + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/cloned-cards/') + #Credit Card cvv Pack + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/credit-card-cvv-pack/') + #Dumps + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/dumps/') + # Credit cards fullz Cc + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/credit-cards-fullz-cc/') + #Equipments + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/equipment/') + #Gift Cards + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/gift-cards/') + #Paypal accounts with ballance + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/paypal-accounts-with-ballance/') + #Visa Prepaid Cards + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/visa-prepaid-crads/') + #Western Union Transfers + #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/western-union-transfer-%f0%9f%92%b1/') + + return links + + +# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through +#topic and description pages are crawled through here, where both types of pages are saved +#@param: selenium driver +def crawlForum(driver): + print("Crawling the DarkMarket market") + + linksToCrawl = getInterestedLinks() + + i = 0 + while i < len(linksToCrawl): + link = linksToCrawl[i] + print('Crawling :', link) + try: + has_next_page = True + count = 0 + + while has_next_page: + try: + driver.get(link) + except: + driver.refresh() + html = driver.page_source + savePage(driver, html, linksToCrawl[i] + f"page{count+1}") + + list = productPages(html) + + for item in list: + # what is this line doing? + itemURL = urlparse.urljoin(baseURL, str(item)) + try: + driver.get(itemURL) + except: + driver.refresh() + savePage(driver, driver.page_source, item) + driver.back() + + + + # # comment out + # break + # + # # comment out + #if count == 1: + #break + + #Finding next page, but have not tested if the class name and value work + try: + link = driver.find_element(by=By.XPATH, value= '/html/body/div[1]/div/div/div/div[2]/div/div/main/nav/ul/li[3]/a').get_attribute('href') + if link == "": + raise NoSuchElementException + count += 1 + + except NoSuchElementException: + has_next_page = False + + except Exception as e: + print(link, e) + i += 1 + + print("Crawling the DarkMarket market done.") + + +# Returns 'True' if the link is a description link +#@param: url of any url crawled +#return: true if is a description page, false if not +def isDescriptionLink(url): + if 'product' in url: + return True + return False + + +# Returns True if the link is a listingPage link +#@param: url of any url crawled +#return: true if is a Listing page, false if not +def isListingLink(url): + if 'product-category' in url: + return True + return False + + +# calling the parser to define the links, the html is the url of a link from the list of interested link list +#@param: link from interested link list ie. getInterestingLinks() +#return: list of description links that should be crawled through +def productPages(html): + soup = BeautifulSoup(html, "html.parser") + return darkmarket_links_parser(soup) + + + + +def crawler(): + startCrawling() + # print("Crawling and Parsing BestCardingWorld .... DONE!") diff --git a/MarketPlaces/DarkMarket/parser.py b/MarketPlaces/DarkMarket/parser.py new file mode 100644 index 0000000..34be872 --- /dev/null +++ b/MarketPlaces/DarkMarket/parser.py @@ -0,0 +1,223 @@ +__author__ = 'DarkWeb' + +# Here, we are importing the auxiliary functions to clean or convert data +from MarketPlaces.Utilities.utilities import * + +# Here, we are importing BeautifulSoup to search through the HTML tree +from bs4 import BeautifulSoup +import re + + +#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs +#stores info it needs in different lists, these lists are returned after being organized +#@param: soup object looking at html page of description page +#return: 'row' that contains a variety of lists that each hold info on the description page +def darkmarket_description_parser(soup): + + # Fields to be parsed + + vendor = "-1" # 0 *Vendor_Name + success = "-1" # 1 Vendor_Successful_Transactions + rating_vendor = "-1" # 2 Vendor_Rating + name = "-1" # 3 *Product_Name + describe = "-1" # 4 Product_Description + CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) + MS = "-1" # 6 Product_MS_Classification (Microsoft Security) + category = "-1" # 7 Product_Category + views = "-1" # 8 Product_Number_Of_Views + reviews = "-1" # 9 Product_Number_Of_Reviews + rating_item = "-1" # 10 Product_Rating + addDate = "-1" # 11 Product_AddedDate + BTC = "-1" # 12 Product_BTC_SellingPrice + USD = "-1" # 13 Product_USD_SellingPrice + EURO = "-1" # 14 Product_EURO_SellingPrice + sold = "-1" # 15 Product_QuantitySold + left = "-1" # 16 Product_QuantityLeft + shipFrom = "-1" # 17 Product_ShippedFrom + shipTo = "-1" # 18 Product_ShippedTo + image = "-1" # 19 Product_Image + vendor_image = "-1" # 20 Vendor_Image + + + #Dark Market no vendor + ''' + divmd7 = soup.find('div', {'class': "col-md-7"}) + + # Finding Vendor + vendor = divmd7.find('a').text.strip() + ''' + + #Finding Product name + name = soup.find('h1', {'class': "entry-title"}).text.strip() + + # Finding Prices + USD = soup.find('span', {'class': "woocommerce-Price-currencySymbol"}).text.strip() + + + # Finding Product Image + image = soup.find('img', {'class': 'wp-post-image'}) + image = image.get('src') + image = image.split('base64,')[-1] + + # Finding the Product description + describe = soup.find('div', {'class': "woocommerce-Tabs-panel--description"}).text + describe = cleanString(describe.strip()) + #Gets rid of the Description word (the first

) + describe = describe[11:] + + #Finding Category + category = soup.find('a', {'rel': 'tag'}).text.strip() + + #Finding number of product reviews + reviews = soup.find('span', class_="count").text.strip() + + #Finding producting rating + rating_item = soup.find('strong', {'class': 'rating'}).text.strip() + + # Searching for CVE and MS categories + cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) + if cve: + CVE = " " + for idx in cve: + CVE += (idx) + CVE += " " + CVE = CVE.replace(',', ' ') + CVE = CVE.replace('\n', '') + ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) + if ms: + MS = " " + for im in ms: + MS += (im) + MS += " " + MS = MS.replace(',', ' ') + MS = MS.replace('\n', '') + + # Populating the final variable (this should be a list with all fields scraped) + row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, + BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image) + + # Sending the results + return row + + +#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs +#stores info it needs in different lists, these lists are returned after being organized +#@param: soup object looking at html page of listing page +#return: 'row' that contains a variety of lists that each hold info on the listing page +def darkmarket_listing_parser(soup): + + # Fields to be parsed + nm = 0 # *Total_Products (Should be Integer) + mktName = "DarkMarket" # 0 *Marketplace_Name + vendor = [] # 1 *Vendor y + rating_vendor = [] # 2 Vendor_Rating + success = [] # 3 Vendor_Successful_Transactions + name = [] # 4 *Product_Name y + CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this + MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this + category = [] # 7 Product_Category y + describe = [] # 8 Product_Description + views = [] # 9 Product_Number_Of_Views + reviews = [] # 10 Product_Number_Of_Reviews + rating_item = [] # 11 Product_Rating + addDate = [] # 12 Product_AddDate + BTC = [] # 13 Product_BTC_SellingPrice + USD = [] # 14 Product_USD_SellingPrice y + EURO = [] # 15 Product_EURO_SellingPrice + sold = [] # 16 Product_QuantitySold + qLeft = [] # 17 Product_QuantityLeft + shipFrom = [] # 18 Product_ShippedFrom + shipTo = [] # 19 Product_ShippedTo + image = [] # 20 Product_Image + image_vendor = [] # 21 Vendor_Image + href = [] # 22 Product_Links + + listing = soup.findAll('li', {"class": ["type-product", "product"]}) + + # Populating the Number of Products + nm = len(listing) + + for a in listing: + bae = a.findAll('a', href=True) + + # Adding the url to the list of urls (Product Links) + link = bae[0].get('href') + href.append(link) + + # Category + temp_category = soup.find('h1', {'class': 'page-title'}).text.strip() + category.append(temp_category) + + # Product Name + product = a.find('h2', {"class": "woocommerce-loop-product__title"}).text + product = product.replace('\n', ' ') + product = product.replace(",", "") + product = product.replace("...", "") + product = product.strip() + name.append(product) + + # USD Price + price = a.find('span', {"class": "woocommerce-Price-currencySymbol"}) + if "$" in price: + tempUSD = price.next_sibling.strip() + #tempUSD = price.get_next + USD.append(tempUSD) + else: + USD.append("-1") + + + # Img + product_image = a.find('img', class_='attachment-woocommerce_thumbnail') + product_image = product_image.get('src') + #print("src ", str(product_image)) + product_image = product_image.split('base64,')[-1] + #print("listing ", str(product_image)[0:30]) + image.append(product_image) + + # Searching for CVE and MS categories + cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) + if not cve: + cveValue="-1" + else: + cee = " " + for idx in cve: + cee += (idx) + cee += " " + cee = cee.replace(',', ' ') + cee = cee.replace('\n', '') + cveValue=cee + CVE.append(cveValue) + + ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) + if not ms: + MSValue="-1" + else: + me = " " + for im in ms: + me += (im) + me += " " + me = me.replace(',', ' ') + me = me.replace('\n', '') + MSValue=me + MS.append(MSValue) + + # Populate the final variable (this should be a list with all fields scraped) + return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, + reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor) + + +#called by the crawler to get description links on a listing page +#@param: beautifulsoup object that is using the correct html page (listing page) +#return: list of description links from a listing page +def darkmarket_links_parser(soup): + # Returning all links that should be visited by the Crawler + href = [] + listing = soup.find_all('li', class_=["product", "type-product"]) + + for a in listing: + bae = a.find('a', href=True) + link = bae['href'] + href.append(link) + + return href +