@ -0,0 +1,300 @@ | |||
__author__ = 'DarkWeb' | |||
''' | |||
Go Fish market Crawler (Selenium) | |||
- this is a new marketplace and was up for only a few days, crawler has not been finished | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
from datetime import date | |||
import subprocess | |||
import configparser | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.GoFish.parser import gofish_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
counter = 1 | |||
baseURL = 'http://gofishbybookb4a2kvviuygmwjqfxx7nqsovweogs2cxvqvexhe7edyd.onion/' | |||
# Opens Tor Browser, crawls the website, then parses, then closes tor | |||
#acts like the main method for the crawler, another function at the end of this code calls this function later | |||
def startCrawling(): | |||
# opentor() | |||
mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
new_parse(mktName, baseURL, True) | |||
# Opens Tor Browser | |||
#prompts for ENTER input to continue | |||
def opentor(): | |||
from MarketPlaces.Initialization.markets_mining import config | |||
global pid | |||
print("Connecting Tor...") | |||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Returns the name of the website | |||
#return: name of site in string type | |||
def getMKTName(): | |||
name = 'GoFish' | |||
return name | |||
# Return the base link of the website | |||
#return: url of base site in string type | |||
def getFixedURL(): | |||
url = 'http://gofishbybookb4a2kvviuygmwjqfxx7nqsovweogs2cxvqvexhe7edyd.onion/' | |||
return url | |||
# Closes Tor Browser | |||
#@param: current selenium driver | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
from MarketPlaces.Initialization.markets_mining import config | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
# ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
# ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 1) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", False) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
# driver.maximize_window() | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
#return: return the selenium driver or string 'down' | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha | |||
# then allows for manual solving of captcha in the terminal | |||
#@param: current selenium web driver | |||
def login(driver): | |||
input("Press ENTER when CAPTCHA is completed\n") | |||
# wait for page to show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, '//*[@id="username"]'))) | |||
# entering username and password into input boxes | |||
usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]') | |||
# Username here | |||
usernameBox.send_keys('itsmedio') | |||
passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]') | |||
# Password here | |||
passwordBox.send_keys('DementedBed123-') | |||
input("Press ENTER when CAPTCHA and exit pressed is completed\n") | |||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, "/html/body/div/div[2]/div/div/div/div/div/div[1]/a/img"))) | |||
# Saves the crawled html page, makes the directory path for html pages if not made | |||
def savePage(driver, page, url): | |||
cleanPage = cleanHTML(driver, page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
#@param: raw url as crawler crawls through every site | |||
def getFullPathName(url): | |||
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE | |||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') | |||
else: | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') | |||
return fullPath | |||
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned | |||
#@param: raw url as crawler crawls through every site | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list | |||
#in this example, there are a couple of categories some threads fall under such as | |||
# Guides and Tutorials, Digital Products, and Software and Malware | |||
#as you can see they are categories of products | |||
def getInterestedLinks(): | |||
links = [] | |||
# Hacking | |||
links.append('http://gofishbybookb4a2kvviuygmwjqfxx7nqsovweogs2cxvqvexhe7edyd.onion/?c=129') | |||
# Malware | |||
links.append('http://gofishbybookb4a2kvviuygmwjqfxx7nqsovweogs2cxvqvexhe7edyd.onion/?c=97') | |||
# Exploits | |||
links.append('http://gofishbybookb4a2kvviuygmwjqfxx7nqsovweogs2cxvqvexhe7edyd.onion/?c=107') | |||
return links | |||
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through | |||
#topic and description pages are crawled through here, where both types of pages are saved | |||
#@param: selenium driver | |||
def crawlForum(driver): | |||
print("Crawling the GoFish market") | |||
linksToCrawl = getInterestedLinks() | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
has_next_page = True | |||
count = 0 | |||
while has_next_page: | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(driver, html, link) | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver, driver.page_source, item) | |||
time.sleep(3) | |||
driver.back() | |||
# comment out | |||
break | |||
# comment out | |||
if count == 1: | |||
break | |||
try: | |||
link = driver.find_element(by=By.LINK_TEXT, value='Next ›').get_attribute('href') | |||
if link == "": | |||
raise NoSuchElementException | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
print("Crawling the GoFishMarket done.") | |||
# Returns 'True' if the link is a description link | |||
#@param: url of any url crawled | |||
#return: true if is a description page, false if not | |||
def isDescriptionLink(url): | |||
if 'c' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link | |||
#@param: url of any url crawled | |||
#return: true if is a Listing page, false if not | |||
def isListingLink(url): | |||
if 'a' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links, the html is the url of a link from the list of interested link list | |||
#@param: link from interested link list ie. getInterestingLinks() | |||
#return: list of description links that should be crawled through | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return gofish_links_parser(soup) | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,327 @@ | |||
__author__ = 'DarkWeb' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of description page | |||
#return: 'row' that contains a variety of lists that each hold info on the description page | |||
def gofish_description_parser(soup): | |||
# Fields to be parsed | |||
vendor = "-1" # 0 *Vendor_Name | |||
success = "-1" # 1 Vendor_Successful_Transactions | |||
rating_vendor = "-1" # 2 Vendor_Rating | |||
name = "-1" # 3 *Product_Name | |||
describe = "-1" # 4 Product_Description | |||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about that much | |||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) dont worry about that much | |||
category = "-1" # 7 Product_Category | |||
views = "-1" # 8 Product_Number_Of_Views | |||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||
rating_item = "-1" # 10 Product_Rating | |||
addDate = "-1" # 11 Product_AddedDate | |||
BTC = "-1" # 12 Product_BTC_SellingPrice | |||
USD = "-1" # 13 Product_USD_SellingPrice | |||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||
sold = "-1" # 15 Product_QuantitySold | |||
left = "-1" # 16 Product_QuantityLeft | |||
shipFrom = "-1" # 17 Product_ShippedFrom | |||
shipTo = "-1" # 18 Product_ShippedTo | |||
#vendor name | |||
try: | |||
temp = soup.find('div', {'class': 'box rounded mb-0'}).find('a').text | |||
vendor = (cleanString(temp.strip())) | |||
except: | |||
vendor = "-1" | |||
#successful transaction | |||
try: | |||
temp = soup.findAll('div', {'class','text-center text-truncate column-flex ml-1 mr-1'}) #card sidebar-menu mb-4 card sidebar-menu mb-4 | |||
temp2 = temp[1].findAll('span', {'class', 'float-right font-weight-bold'}) | |||
temp = temp2[1].text | |||
success = (temp.strip()) | |||
except: | |||
print("success") | |||
#vendor rating 5 | |||
try: | |||
temp = soup.findAll('div', {'class', 'text-center text-truncate column-flex ml-1 mr-1'}) # card sidebar-menu mb-4 card sidebar-menu mb-4 | |||
temp2 = temp[1].findAll('span', {'class', 'float-right font-weight-bold'}) | |||
temp = temp2[5].text | |||
rating_vendor = (cleanString(temp.strip())) | |||
except: | |||
print("product") | |||
# product name | |||
try: | |||
temp = soup.find('h3', {'class', 'h3 rounded card-title'}).find('span').text | |||
name = (cleanString(temp.strip())) | |||
except: | |||
temp = soup.find('h3', {'class', 'h3 rounded card-title'}).find('span').find("div").text | |||
name = (cleanString(temp.strip())) | |||
# product description | |||
describe = soup.find('div', {'class': "box rounded flex-fill"}).find('pre').text | |||
if "\n" in describe: | |||
describe = describe.replace("\n", " ") | |||
describe = describe.replace("\r", " ") | |||
describe = cleanString(describe.strip()) | |||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about that much | |||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) dont worry about that much | |||
# product category | |||
try: | |||
temp = soup.findAll('table', {'class', 'table table-hover'}) | |||
temp2 = temp[1].find('tr').findAll('td') | |||
temp = temp2[1].text | |||
category = cleanString(temp.strip()) | |||
except: | |||
temp = soup.find('table', {'class', 'table table-hover'}) | |||
temp2 = temp.find('tbody').find('tr').findAll('td') | |||
temp = temp2[1].text | |||
category = cleanString(temp.strip()) | |||
# product number of view | |||
try: | |||
temp = soup.find('div', {'class', 'box rounded mb-0'}) | |||
temp2 = temp.findAll('i') | |||
temp = temp2[2].text | |||
views = cleanString((temp.strip())) | |||
except: | |||
print('Product number of view') | |||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||
rating_item = "-1" # 10 Product_Rating | |||
addDate = "-1" # 11 Product_AddedDate | |||
#BTC selling price box box-rounded mt-2 | |||
try: | |||
temp = soup.find('div', {'class', 'box box-rounded mt-2'}) | |||
temp2 = temp.findAll('i', {'class', 'float-right color-prices'}) | |||
temp = temp2[1].text | |||
BTC = cleanString((temp.strip())) | |||
except: | |||
try: | |||
temp = soup.find('div', {'class', 'box box-rounded'}) | |||
temp2 = temp.findAll('span', {'class', 'float-right color-prices'}) | |||
temp = temp2[1].text | |||
BTC = cleanString((temp.strip())) | |||
except: | |||
print("BTC") | |||
# USD selling price | |||
try: | |||
temp = soup.find('div', {'class', 'box box-rounded mt-2'}) | |||
temp2 = temp.findAll('center') | |||
temp = temp2[1].find('i').text | |||
if "$" in temp: | |||
temp = temp.replace("$", "") | |||
USD = cleanString((temp.strip())) | |||
except: | |||
try: | |||
temp = soup.find('div', {'class', 'box box-rounded'}) | |||
temp2 = temp.findAll('center') | |||
temp = temp2[1].find('span').text | |||
if "$" in temp: | |||
temp = temp.replace("$", "") | |||
USD = cleanString((temp.strip())) | |||
except: | |||
print("USD") | |||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||
# product sold | |||
try: | |||
temp = soup.find('div', {'class', 'box rounded mb-0'}) # card sidebar-menu mb-4 card sidebar-menu mb-4 | |||
temp2 = temp.find('i') | |||
temp = temp2.text | |||
sold = (cleanString(temp.strip())) | |||
# sold = "-1" | |||
except: | |||
print("product sold") | |||
# product quantatiy left ###ERRROR | |||
try: | |||
temp = soup.findAll('table', {'class', 'table table-hover'}) | |||
temp2 = temp[1].findAll('tr') | |||
temp3 = temp2[1].findAll('td') | |||
temp = temp3[1].text | |||
left = cleanString(temp.strip()) | |||
except: | |||
temp = soup.find('table', {'class', 'table table-hover'}) | |||
temp2 = temp.findAll('tr') | |||
temp3 = temp2[1].findAll('td') | |||
temp = temp3[1].text | |||
left = cleanString(temp.strip()) | |||
shipFrom = "-1" # 17 Product_ShippedFrom | |||
shipTo = "-1" # 18 Product_ShippedTo | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, | |||
BTC, USD, EURO, sold, left, shipFrom, shipTo) | |||
# Sending the results | |||
return row | |||
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of listing page | |||
#return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def gofish_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # *Total_Products (Should be Integer) | |||
mktName = "GoFish" # 0 *Marketplace_Name | |||
vendor = [] # 1 *Vendor y | |||
rating_vendor = [] # 2 Vendor_Rating | |||
success = [] # 3 Vendor_Successful_Transactions | |||
name = [] # 4 *Product_Name y | |||
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this | |||
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this | |||
category = [] # 7 Product_Category y | |||
describe = [] # 8 Product_Description | |||
views = [] # 9 Product_Number_Of_Views | |||
reviews = [] # 10 Product_Number_Of_Reviews | |||
rating_item = [] # 11 Product_Rating | |||
addDate = [] # 12 Product_AddDate | |||
BTC = [] # 13 Product_BTC_SellingPrice | |||
USD = [] # 14 Product_USD_SellingPrice y | |||
EURO = [] # 15 Product_EURO_SellingPrice | |||
sold = [] # 16 Product_QuantitySold | |||
qLeft = [] # 17 Product_QuantityLeft | |||
shipFrom = [] # 18 Product_ShippedFrom | |||
shipTo = [] # 19 Product_ShippedTo | |||
href = [] # 20 Product_Links | |||
listing = soup.findAll('div', {"class": "card mt-1"}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
# vendor | |||
try: | |||
temp = a.find('div', {'class','col-5 justify-content-between mx-auto'}).find('a').text | |||
vendor.append(cleanString(temp.strip())) | |||
except: | |||
print('vendor') | |||
#vendor rating | |||
#successful transactions CHECK AGAIN HERE | |||
try: | |||
success.append("-1") | |||
except: | |||
print('successful transactions') | |||
# product name | |||
try: | |||
temp = a.find('h5', {'class','card-title rounded text-truncate'}).find('a').text | |||
name.append(cleanString(temp.strip())) | |||
except: | |||
print('product name') | |||
CVE.append('-1') | |||
MS.append('-1') | |||
rating_vendor.append('-1') | |||
# product category | |||
try: | |||
temp = soup.find('div', {'class', 'card-sidebar-menu box mb-2 flex-column'}).find('h3').find('span').text | |||
if "Search Results for: " in temp: | |||
temp = temp.replace("Search Results for: ", "") | |||
category.append(cleanString(temp.strip())) | |||
except: | |||
print("Error in product category") | |||
describe.append('-1') | |||
# product views | |||
try: | |||
temp = a.find('h6',{'class', 'card-subtitle mb-1 text-muted text-truncate'}) | |||
temp2 = temp.find('i').text | |||
views.append(cleanString(temp2.strip())) | |||
except: | |||
print("Error in views") | |||
reviews.append('-1') # 10 Product_Number_Of_Reviews | |||
rating_item.append('-1') # 11 Product_Rating | |||
addDate.append('-1') # 12 Product_AddDate | |||
# BTC | |||
try: | |||
temp = a.find('div', {'class', 'col-3 justify-content-between mx-auto'}) | |||
temp2 = temp.findAll('p') | |||
temp = temp2[1].text | |||
BTC.append(cleanString(temp.strip())) | |||
except: | |||
print("BTC") | |||
#USD ERROR get rid of $ | |||
try: | |||
temp = a.find('div', {'class', 'col-12 justify-content-between mx-auto'}).find('i').text | |||
if '$' in temp: | |||
temp = temp.replace("$", "") | |||
USD.append(cleanString(temp.strip())) # 14 Product_USD_SellingPrice | |||
except: | |||
print("USD") | |||
EURO.append("-1") # 15 Product_EURO_SellingPrice | |||
#product sold | |||
try: | |||
temp = a.find('div', {'class', 'col-12 mx-auto text-truncate text-center flex-fill'}).findAll('p', {'class', 'card-text mb-0'}) | |||
temp2 = temp[1].find('i').text | |||
sold.append(cleanString(temp2.strip())) | |||
except: | |||
print("product sold") | |||
qLeft.append('-1') # 17 Product_QuantityLeft | |||
shipFrom.append('-1') # 18 Product_ShippedFrom | |||
shipTo.append('-1') # 19 Product_ShippedTo | |||
#href | |||
try: | |||
temp = a.find('h5', {'class', 'card-title rounded text-truncate'}).find('a').get('href') | |||
href.append(temp) # 20 Product_Links | |||
except: | |||
print("href") | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, | |||
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href) | |||
#called by the crawler to get description links on a listing page | |||
#@param: beautifulsoup object that is using the correct html page (listing page) | |||
#return: list of description links from a listing page | |||
def gofish_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.findAll('div', {"class": "card mt-1"}) | |||
for a in listing: | |||
bae = a.find('a', href=True)#card-title rounded text-truncate | |||
link = bae['href'] | |||
href.append(link) | |||
return href |
@ -0,0 +1,308 @@ | |||
__author__ = 'DarkWeb' | |||
''' | |||
Torzon Market Crawler (Selenium) | |||
- problem extracting description links from HTML structure, continue work with this crawler | |||
by fixing torzon_links_parser() | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
from datetime import date | |||
import subprocess | |||
import configparser | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.Torzon.parser import torzon_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
counter = 1 | |||
BASE_URL = 'http://torzon4kv5swfazrziqvel2imhxcckc4otcvopiv5lnxzpqu4v4m5iyd.onion' | |||
# Opens Tor Browser, crawls the website, then parses, then closes tor | |||
#acts like the main method for the crawler, another function at the end of this code calls this function later | |||
def startCrawling(): | |||
opentor() | |||
mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
new_parse(mktName, BASE_URL, False) | |||
# Opens Tor Browser | |||
#prompts for ENTER input to continue | |||
def opentor(): | |||
from MarketPlaces.Initialization.markets_mining import config | |||
global pid | |||
print("Connecting Tor...") | |||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Returns the name of the website | |||
#return: name of site in string type | |||
def getMKTName(): | |||
name = 'Torzon' | |||
return name | |||
# Return the base link of the website | |||
#return: url of base site in string type | |||
def getFixedURL(): | |||
url = 'http://torzon4kv5swfazrziqvel2imhxcckc4otcvopiv5lnxzpqu4v4m5iyd.onion' | |||
return url | |||
# Closes Tor Browser | |||
#@param: current selenium driver | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
from MarketPlaces.Initialization.markets_mining import config | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
# ff_prof.set_preference("network.dns.disablePrefetch", True)#connection issue | |||
# ff_prof.set_preference("network.http.sendRefererHeader", 0)#connection issue | |||
ff_prof.set_preference("permissions.default.image", 1) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", False) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
#return: return the selenium driver or string 'down' | |||
def getAccess(): | |||
# url = getFixedURL() | |||
driver = createFFDriver() ###### may want to use BASE_URL instead ####### | |||
try: | |||
driver.get(BASE_URL) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha | |||
# then allows for manual solving of captcha in the terminal | |||
#@param: current selenium web driver | |||
def login(driver): | |||
input("Press ENTER when CAPTCHA is completed and page is loaded\n") | |||
# wait for page to show up (This Xpath may need to change based on different seed url) | |||
# Saves the crawled html page, makes the directory path for html pages if not made | |||
def savePage(page, url): | |||
cleanPage = cleanHTML(page) | |||
filePath = getFullPathName(url) | |||
# filePath = getFullPathName("Hello") | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
with open(filePath, 'wb') as file: | |||
file.write(cleanPage.encode('utf-8')) | |||
# open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
#@param: raw url as crawler crawls through every site | |||
def getFullPathName(url): | |||
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE | |||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'/Description/' + fileName + '.html') | |||
else: | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'/Listing/' + fileName + '.html') | |||
return fullPath | |||
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned | |||
#@param: raw url as crawler crawls through every site | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list | |||
#in this example, there are a couple of categories some threads fall under such as | |||
# Guides and Tutorials, Digital Products, and Software and Malware | |||
#as you can see they are categories of products | |||
def getInterestedLinks(): | |||
links = [] | |||
# # services | |||
links.append('http://torzon4kv5swfazrziqvel2imhxcckc4otcvopiv5lnxzpqu4v4m5iyd.onion/products.php?sta=1&shipsto=All&shipsfrom=All&category=Services&small=0&big=5000000&id=1995441210213618738586452129269668912607120977870') | |||
# # software & malware | |||
links.append('http://torzon4kv5swfazrziqvel2imhxcckc4otcvopiv5lnxzpqu4v4m5iyd.onion/products.php?sta=1&shipsto=All&shipsfrom=All&category=Services&small=0&big=5000000&id=1995441210213618738586452129269668912607120977870') | |||
# # fraud | |||
links.append('http://torzon4kv5swfazrziqvel2imhxcckc4otcvopiv5lnxzpqu4v4m5iyd.onion/products.php?sta=1&shipsto=All&shipsfrom=All&category=Services&small=0&big=5000000&id=1995441210213618738586452129269668912607120977870') | |||
# # guides | |||
links.append('http://torzon4kv5swfazrziqvel2imhxcckc4otcvopiv5lnxzpqu4v4m5iyd.onion/products.php?sta=1&shipsto=All&shipsfrom=All&category=Guides and Tutorials&small=0&big=5000000&id=75026212163304997524932260388151806190538071909089') | |||
return links | |||
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through | |||
#topic and description pages are crawled through here, where both types of pages are saved | |||
#@param: selenium driver | |||
def crawlForum(driver): | |||
print("Crawling the Torzon market") | |||
linksToCrawl = getInterestedLinks() | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
has_next_page = True | |||
count = 0 | |||
while has_next_page: | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(BASE_URL, str(item)) | |||
try: | |||
time.sleep(1.5) # to keep from detecting click speed | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
time.sleep(1.5) | |||
driver.back() | |||
# to keep from detecting click speed | |||
# # comment out | |||
# break | |||
# | |||
# # comment out | |||
# if count == 1: | |||
# break | |||
try: | |||
# nav = driver.find_element(by=By.XPATH, value='/html/body/table[1]/tbody/tr/td/form/div/div[2]/table[2]') | |||
# a = nav.find_element(by=By.LINK_TEXT, value=">") | |||
link = driver.find_element(by=By.LINK_TEXT, value=">").get_attribute('href') | |||
if link == "": | |||
raise NoSuchElementException | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
# raise e | |||
i += 1 | |||
input("Crawling Torzon market done sucessfully. Press ENTER to continue\n") | |||
# Returns 'True' if the link is a description link | |||
#@param: url of any url crawled | |||
#return: true if is a description page, false if not | |||
def isDescriptionLink(url): | |||
if 'products/' in url and '/products/?category' not in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link | |||
#@param: url of any url crawled | |||
#return: true if is a Listing page, false if not | |||
def isListingLink(url): | |||
if '?category' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links, the html is the url of a link from the list of interested link list | |||
#@param: link from interested link list ie. getInterestingLinks() | |||
#return: list of description links that should be crawled through | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return torzon_links_parser(soup) | |||
# Drop links that "signout" | |||
# def isSignOut(url): | |||
# #absURL = urlparse.urljoin(url.base_url, url.url) | |||
# if 'signout' in url.lower() or 'logout' in url.lower(): | |||
# return True | |||
# | |||
# return False | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,328 @@ | |||
__author__ = 'Helium' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of description page | |||
#return: 'row' that contains a variety of lists that each hold info on the description page | |||
def torzon_description_parser(soup): | |||
# Fields to be parsed | |||
vendor = "-1" # 0 *Vendor_Name | |||
success = "-1" # 1 Vendor_Successful_Transactions | |||
rating_vendor = "-1" # 2 Vendor_Rating | |||
name = "-1" # 3 *Product_Name | |||
describe = "-1" # 4 Product_Description | |||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about that much | |||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) dont worry about that much | |||
category = "-1" # 7 Product_Category | |||
views = "-1" # 8 Product_Number_Of_Views | |||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||
rating_item = "-1" # 10 Product_Rating | |||
addDate = "-1" # 11 Product_AddedDate | |||
BTC = "-1" # 12 Product_BTC_SellingPrice | |||
USD = "-1" # 13 Product_USD_SellingPrice | |||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||
sold = "-1" # 15 Product_QuantitySold | |||
left = "-1" # 16 Product_QuantityLeft | |||
shipFrom = "-1" # 17 Product_ShippedFrom | |||
shipTo = "-1" # 18 Product_ShippedTo | |||
#vendor name | |||
try: | |||
temp = soup.find('div', {'class': 'box rounded mb-0'}).find('a').text | |||
vendor = (cleanString(temp.strip())) | |||
except: | |||
vendor = "-1" | |||
#successful transaction | |||
try: | |||
temp = soup.findAll('div', {'class','text-center text-truncate column-flex ml-1 mr-1'}) #card sidebar-menu mb-4 card sidebar-menu mb-4 | |||
temp2 = temp[1].findAll('span', {'class', 'float-right font-weight-bold'}) | |||
temp = temp2[1].text | |||
success = (temp.strip()) | |||
except: | |||
print("success") | |||
#vendor rating 5 | |||
try: | |||
temp = soup.findAll('div', {'class', 'text-center text-truncate column-flex ml-1 mr-1'}) # card sidebar-menu mb-4 card sidebar-menu mb-4 | |||
temp2 = temp[1].findAll('span', {'class', 'float-right font-weight-bold'}) | |||
temp = temp2[5].text | |||
rating_vendor = (cleanString(temp.strip())) | |||
except: | |||
print("product") | |||
# product name | |||
try: | |||
temp = soup.find('h3', {'class', 'h3 rounded card-title'}).find('span').text | |||
name = (cleanString(temp.strip())) | |||
except: | |||
temp = soup.find('h3', {'class', 'h3 rounded card-title'}).find('span').find("div").text | |||
name = (cleanString(temp.strip())) | |||
# product description | |||
describe = soup.find('div', {'class': "box rounded flex-fill"}).find('pre').text | |||
if "\n" in describe: | |||
describe = describe.replace("\n", " ") | |||
describe = describe.replace("\r", " ") | |||
describe = cleanString(describe.strip()) | |||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about that much | |||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) dont worry about that much | |||
# product category | |||
try: | |||
temp = soup.findAll('table', {'class', 'table table-hover'}) | |||
temp2 = temp[1].find('tr').findAll('td') | |||
temp = temp2[1].text | |||
category = cleanString(temp.strip()) | |||
except: | |||
temp = soup.find('table', {'class', 'table table-hover'}) | |||
temp2 = temp.find('tbody').find('tr').findAll('td') | |||
temp = temp2[1].text | |||
category = cleanString(temp.strip()) | |||
# product number of view | |||
try: | |||
temp = soup.find('div', {'class', 'box rounded mb-0'}) | |||
temp2 = temp.findAll('i') | |||
temp = temp2[2].text | |||
views = cleanString((temp.strip())) | |||
except: | |||
print('Product number of view') | |||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||
rating_item = "-1" # 10 Product_Rating | |||
addDate = "-1" # 11 Product_AddedDate | |||
#BTC selling price box box-rounded mt-2 | |||
try: | |||
temp = soup.find('div', {'class', 'box box-rounded mt-2'}) | |||
temp2 = temp.findAll('i', {'class', 'float-right color-prices'}) | |||
temp = temp2[1].text | |||
BTC = cleanString((temp.strip())) | |||
except: | |||
try: | |||
temp = soup.find('div', {'class', 'box box-rounded'}) | |||
temp2 = temp.findAll('span', {'class', 'float-right color-prices'}) | |||
temp = temp2[1].text | |||
BTC = cleanString((temp.strip())) | |||
except: | |||
print("BTC") | |||
# USD selling price | |||
try: | |||
temp = soup.find('div', {'class', 'box box-rounded mt-2'}) | |||
temp2 = temp.findAll('center') | |||
temp = temp2[1].find('i').text | |||
if "$" in temp: | |||
temp = temp.replace("$", "") | |||
USD = cleanString((temp.strip())) | |||
except: | |||
try: | |||
temp = soup.find('div', {'class', 'box box-rounded'}) | |||
temp2 = temp.findAll('center') | |||
temp = temp2[1].find('span').text | |||
if "$" in temp: | |||
temp = temp.replace("$", "") | |||
USD = cleanString((temp.strip())) | |||
except: | |||
print("USD") | |||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||
# product sold | |||
try: | |||
temp = soup.find('div', {'class', 'box rounded mb-0'}) # card sidebar-menu mb-4 card sidebar-menu mb-4 | |||
temp2 = temp.find('i') | |||
temp = temp2.text | |||
sold = (cleanString(temp.strip())) | |||
# sold = "-1" | |||
except: | |||
print("product sold") | |||
# product quantatiy left ###ERRROR | |||
try: | |||
temp = soup.findAll('table', {'class', 'table table-hover'}) | |||
temp2 = temp[1].findAll('tr') | |||
temp3 = temp2[1].findAll('td') | |||
temp = temp3[1].text | |||
left = cleanString(temp.strip()) | |||
except: | |||
temp = soup.find('table', {'class', 'table table-hover'}) | |||
temp2 = temp.findAll('tr') | |||
temp3 = temp2[1].findAll('td') | |||
temp = temp3[1].text | |||
left = cleanString(temp.strip()) | |||
shipFrom = "-1" # 17 Product_ShippedFrom | |||
shipTo = "-1" # 18 Product_ShippedTo | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, | |||
BTC, USD, EURO, sold, left, shipFrom, shipTo) | |||
# Sending the results | |||
return row | |||
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of listing page | |||
#return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def torzon_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # *Total_Products (Should be Integer) | |||
mktName = "Torzon" # 0 *Marketplace_Name | |||
vendor = [] # 1 *Vendor y | |||
rating_vendor = [] # 2 Vendor_Rating | |||
success = [] # 3 Vendor_Successful_Transactions | |||
name = [] # 4 *Product_Name y | |||
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this | |||
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this | |||
category = [] # 7 Product_Category y | |||
describe = [] # 8 Product_Description | |||
views = [] # 9 Product_Number_Of_Views | |||
reviews = [] # 10 Product_Number_Of_Reviews | |||
rating_item = [] # 11 Product_Rating | |||
addDate = [] # 12 Product_AddDate | |||
BTC = [] # 13 Product_BTC_SellingPrice | |||
USD = [] # 14 Product_USD_SellingPrice y | |||
EURO = [] # 15 Product_EURO_SellingPrice | |||
sold = [] # 16 Product_QuantitySold | |||
qLeft = [] # 17 Product_QuantityLeft | |||
shipFrom = [] # 18 Product_ShippedFrom | |||
shipTo = [] # 19 Product_ShippedTo | |||
href = [] # 20 Product_Links | |||
listing = soup.findAll('div', {"class": "card mt-1"}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
# vendor | |||
try: | |||
temp = a.find('div', {'class','col-5 justify-content-between mx-auto'}).find('a').text | |||
vendor.append(cleanString(temp.strip())) | |||
except: | |||
print('vendor') | |||
#vendor rating | |||
#successful transactions CHECK AGAIN HERE | |||
try: | |||
success.append("-1") | |||
except: | |||
print('successful transactions') | |||
# product name | |||
try: | |||
temp = a.find('h5', {'class','card-title rounded text-truncate'}).find('a').text | |||
name.append(cleanString(temp.strip())) | |||
except: | |||
print('product name') | |||
CVE.append('-1') | |||
MS.append('-1') | |||
rating_vendor.append('-1') | |||
# product category | |||
try: | |||
temp = soup.find('div', {'class', 'card-sidebar-menu box mb-2 flex-column'}).find('h3').find('span').text | |||
if "Search Results for: " in temp: | |||
temp = temp.replace("Search Results for: ", "") | |||
category.append(cleanString(temp.strip())) | |||
except: | |||
print("Error in product category") | |||
describe.append('-1') | |||
# product views | |||
try: | |||
temp = a.find('h6',{'class', 'card-subtitle mb-1 text-muted text-truncate'}) | |||
temp2 = temp.find('i').text | |||
views.append(cleanString(temp2.strip())) | |||
except: | |||
print("Error in views") | |||
reviews.append('-1') # 10 Product_Number_Of_Reviews | |||
rating_item.append('-1') # 11 Product_Rating | |||
addDate.append('-1') # 12 Product_AddDate | |||
# BTC | |||
try: | |||
temp = a.find('div', {'class', 'col-3 justify-content-between mx-auto'}) | |||
temp2 = temp.findAll('p') | |||
temp = temp2[1].text | |||
BTC.append(cleanString(temp.strip())) | |||
except: | |||
print("BTC") | |||
#USD ERROR get rid of $ | |||
try: | |||
temp = a.find('div', {'class', 'col-12 justify-content-between mx-auto'}).find('i').text | |||
if '$' in temp: | |||
temp = temp.replace("$", "") | |||
USD.append(cleanString(temp.strip())) # 14 Product_USD_SellingPrice | |||
except: | |||
print("USD") | |||
EURO.append("-1") # 15 Product_EURO_SellingPrice | |||
#product sold | |||
try: | |||
temp = a.find('div', {'class', 'col-12 mx-auto text-truncate text-center flex-fill'}).findAll('p', {'class', 'card-text mb-0'}) | |||
temp2 = temp[1].find('i').text | |||
sold.append(cleanString(temp2.strip())) | |||
except: | |||
print("product sold") | |||
qLeft.append('-1') # 17 Product_QuantityLeft | |||
shipFrom.append('-1') # 18 Product_ShippedFrom | |||
shipTo.append('-1') # 19 Product_ShippedTo | |||
#href | |||
try: | |||
temp = a.find('h5', {'class', 'card-title rounded text-truncate'}).find('a').get('href') | |||
href.append(temp) # 20 Product_Links | |||
except: | |||
print("href") | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, | |||
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href) | |||
#called by the crawler to get description links on a listing page | |||
#@param: beautifulsoup object that is using the correct html page (listing page) | |||
#return: list of description links from a listing page | |||
def torzon_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
# listing = soup.findAll('div', {"class": "card mt-1"}) | |||
listing = soup.find('td', {"valign": "top"}).find("table", {"border": "0"}).findAll('td', {'width': '50%'}) | |||
for a in listing: | |||
bae = a.find('a', href=True)#card-title rounded text-truncate | |||
link = bae['href'] | |||
href.append(link) | |||
return href |