@ -0,0 +1,312 @@ | |||
__author__ = 'DarkWeb' | |||
''' | |||
Abacus Marketplace Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support.ui import Select | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
from datetime import date | |||
import subprocess | |||
import configparser | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.Abacus.parser import abacus_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
counter = 1 | |||
baseURL = 'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion' | |||
def startCrawling(): | |||
mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closeDriver(driver) | |||
# new_parse(mktName, baseURL, True) | |||
# Returns the name of the website | |||
def getMKTName(): | |||
name = 'Abacus' | |||
return name | |||
# Return the base link of the website | |||
def getFixedURL(): | |||
url = 'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion' | |||
return url | |||
# Closes Tor Browser | |||
def closeDriver(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
from MarketPlaces.Initialization.markets_mining import config | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
# ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
# ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 3) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", False) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
driver.maximize_window() | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
def login(driver): | |||
input("Press ENTER when CAPTCHA is complete and login page has loaded\n") | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, '/html/body/div/div/div[1]/div/form/div[3]/input[1]'))) | |||
# entering username and password into input boxes | |||
try: | |||
usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[1]/div/form/div[3]/input[1]') | |||
# Username here | |||
usernameBox.send_keys('ct1234') | |||
passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[1]/div/form/div[3]/input[2]') | |||
# Password here | |||
passwordBox.send_keys('DementedBed123-') | |||
except: | |||
usernameBox = driver.find_element(by=By.CSS_SELECTOR, value='input.border-solid:nth-child(2)') | |||
# Username here | |||
usernameBox.send_keys('ct1234') | |||
passwordBox = driver.find_element(by=By.CSS_SELECTOR, value='input.border-solid:nth-child(4)') | |||
# Password here | |||
passwordBox.send_keys('DementedBed123-') | |||
input("Press ENTER AFTER phishing is completed (there is a captcha first and then an antiphishing check)\n") | |||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, '/html/body/div/div/div[2]/div/div[2]'))) | |||
def savePage(driver, page, url): | |||
cleanPage = cleanHTML(driver, page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
def getFullPathName(url): | |||
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE | |||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') | |||
else: | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') | |||
return fullPath | |||
def getMKTName() -> str: | |||
name = 'Abacus' | |||
return name | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if name == '': | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
def getInterestedLinks(): | |||
links = [] | |||
# botnets and malware | |||
links.append('http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=475756f633d0cc71f0c868bd&cats=2&s_quick=1') | |||
# # social engineering | |||
# links.append( | |||
# 'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=1c29a89f7a4022133cab877d&cats=2&s_quick=1') | |||
# digital | |||
links.append( | |||
'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=475756f633d0cc71f0c868bd&cats=2&s_quick=1') | |||
# # hacking | |||
# links.append( | |||
# 'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=a0773b3de70bdaca38acda2f&cats=2&s_quick=1') | |||
# # carding | |||
# links.append('http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=1b17857dc74c11953df85c55&cats=2&s_quick=1 | |||
return links | |||
def crawlForum(driver): | |||
print("Crawling the Abacus market") | |||
linksToCrawl = getInterestedLinks() | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
has_next_page = True | |||
count = 0 | |||
while has_next_page: | |||
try: | |||
print('waiting ten seconds to avoid ddos check') | |||
time.sleep(10) | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(driver, html, link) | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
print(itemURL) | |||
print('waiting 5 sec to avoid ddos check') | |||
time.sleep(5) | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver, driver.page_source, item) | |||
print('waiting 20 seconds to avoid ddos check') | |||
time.sleep(20) | |||
driver.back() | |||
# comment out | |||
break | |||
# # comment out | |||
# if count == 3: | |||
# break | |||
try: | |||
chev = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[2]/div/div[3]/div[4]') | |||
a_tags = chev.find_elements(by=By.TAG_NAME, value='a') | |||
try: | |||
for a_tag in a_tags: | |||
try: | |||
temp = a_tag.find_element(by=By.CLASS_NAME, value='gg-chevron-right') | |||
except: | |||
temp = '' | |||
if temp: | |||
link = a_tag.get_attribute('href') | |||
print(link) | |||
if link == '#': | |||
link = '' | |||
break | |||
else: | |||
link = '' | |||
except: | |||
try: | |||
a_tag = a_tags[-2].find_element(by=By.CLASS_NAME, value='gg-chevron-right').get_attribute('href') | |||
if a_tag: | |||
link = a_tag.get_attribute('href') | |||
if link == '#': | |||
link = '' | |||
break | |||
else: | |||
link = '' | |||
except: | |||
link='' | |||
if link == "": | |||
raise NoSuchElementException | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
print("Crawling the Abacus market done.") | |||
# Returns 'True' if the link is Topic link, may need to change for every website | |||
def isDescriptionLink(url): | |||
if 'listing' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link, may need to change for every website | |||
def isListingLink(url): | |||
if 'search' in url: | |||
return True | |||
return False | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return abacus_links_parser(soup) | |||
def crawler(): | |||
startCrawling() |
@ -0,0 +1,228 @@ | |||
__author__ = 'DarkWeb' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
# stores info it needs in different lists, these lists are returned after being organized | |||
# @param: soup object looking at html page of description page | |||
# return: 'row' that contains a variety of lists that each hold info on the description page | |||
def abacus_description_parser(soup): | |||
# Fields to be parsed | |||
vendor = "-1" # 0 *Vendor_Name | |||
success = "-1" # 1 Vendor_Successful_Transactions | |||
rating_vendor = "-1" # 2 Vendor_Rating | |||
name = "-1" # 3 *Product_Name | |||
describe = "-1" # 4 Product_Description | |||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) | |||
category = "-1" # 7 Product_Category | |||
views = "-1" # 8 Product_Number_Of_Views | |||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||
rating_item = "-1" # 10 Product_Rating | |||
addDate = "-1" # 11 Product_AddedDate | |||
BTC = "-1" # 12 Product_BTC_SellingPrice | |||
USD = "-1" # 13 Product_USD_SellingPrice | |||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||
sold = "-1" # 15 Product_QuantitySold | |||
left = "-1" # 16 Product_QuantityLeft | |||
shipFrom = "-1" # 17 Product_ShippedFrom | |||
shipTo = "-1" # 18 Product_ShippedTo | |||
image = "-1" # 19 Product_Image | |||
vendor_image = "-1" # 20 Vendor_Image | |||
product_info = soup.find('div', {'class': '2xl:self-start w-full h-full'}) | |||
about_vendor = soup.find('div', {'class': 'px-0.5 py-1 flex flex-col items-center gap-2 text-[13px]'}) | |||
try: | |||
ven_temp = about_vendor.find('div', {'class': 'flex items-center gap-0.5'}).find('a').text | |||
ven_temp_list = ven_temp.split(' ') | |||
if len(ven_temp_list) > 1: | |||
ven_temp_list = ven_temp_list[:-1] | |||
ven_temp = ' '.join(ven_temp_list) | |||
vendor = cleanString(ven_temp.strip()) | |||
except: | |||
vendor = '-1' | |||
print(vendor) | |||
success = "-1" | |||
try: | |||
rating = about_vendor.find('div', {'class':'font-bold rounded px-2'}).text | |||
rating_list = rating.split(' ') | |||
rating = rating_list[0] | |||
rating = rating.replace('%', '') | |||
except: | |||
rating = '-1' | |||
rating_vendor = rating | |||
print(rating_vendor) | |||
name = product_info.find('div', {'class': 'w-full flex gap-0.5 items-center border-solid border-0 border-border border-b group-hover:border-abacus2 text-sm font-bold justify-center 2xl:justify-start py-1 leading-tight'}) | |||
name = cleanString(name.strip()) | |||
try: | |||
description = soup.find('div', {'class': 'hidden gap-2 flex-col w-0 p-3 anim anim-FadeIn'}).find('div', {'class':'text-xs w-full text-left text-black'}).text | |||
description = cleanString(description.strip()) | |||
except: | |||
description = '-1' | |||
describe = description | |||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) | |||
category = "-1" # 7 Product_Category | |||
views = "-1" # 8 Product_Number_Of_Views | |||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||
rating_item = "-1" # 10 Product_Rating | |||
addDate = "-1" # 11 Product_AddedDate | |||
BTC = "-1" # 12 Product_BTC_SellingPrice | |||
USD = "-1" # 13 Product_USD_SellingPrice | |||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||
sold = "-1" # 15 Product_QuantitySold | |||
left = "-1" # 16 Product_QuantityLeft | |||
shipFrom = "-1" # 17 Product_ShippedFrom | |||
shipTo = "-1" # 18 Product_ShippedTo | |||
image = "-1" # 19 Product_Image | |||
vendor_image = "-1" # 20 Vendor_Image | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, | |||
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image) | |||
# Sending the results | |||
return row | |||
# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
# stores info it needs in different lists, these lists are returned after being organized | |||
# @param: soup object looking at html page of listing page | |||
# return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def abacus_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # *Total_Products (Should be Integer) | |||
mktName = "Abacus" # 0 *Marketplace_Name | |||
vendor = [] # 1 *Vendor y | |||
rating_vendor = [] # 2 Vendor_Rating | |||
success = [] # 3 Vendor_Successful_Transactions | |||
name = [] # 4 *Product_Name y | |||
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this | |||
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this | |||
category = [] # 7 Product_Category y | |||
describe = [] # 8 Product_Description | |||
views = [] # 9 Product_Number_Of_Views | |||
reviews = [] # 10 Product_Number_Of_Reviews | |||
rating_item = [] # 11 Product_Rating | |||
addDate = [] # 12 Product_AddDate | |||
BTC = [] # 13 Product_BTC_SellingPrice | |||
USD = [] # 14 Product_USD_SellingPrice y | |||
EURO = [] # 15 Product_EURO_SellingPrice | |||
sold = [] # 16 Product_QuantitySold | |||
qLeft = [] # 17 Product_QuantityLeft | |||
shipFrom = [] # 18 Product_ShippedFrom | |||
shipTo = [] # 19 Product_ShippedTo | |||
image = [] # 20 Product_Image | |||
image_vendor = [] # 21 Vendor_Image | |||
href = [] # 22 Product_Links | |||
cat = soup.find('span', {"class": "btn btn-sm btn-outline-dark w-100 active"}).text | |||
cat = cleanString(cat).strip() | |||
listing = soup.find('div', {"class": 'card-body text-black text-left bg-dark'}).findAll('div', {"class": 'card mb-4 border-danger rounded-0'}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
category.append(cat) | |||
# Adding the url to the list of urls | |||
link = a.find('a', {'class': "badge badge-danger w-100 text-white"}).get('href') | |||
link = cleanLink(link) | |||
href.append(link) | |||
# Finding the Product name | |||
product = a.find('div', {"class": 'marquee-parent'}).find('div', {"class": "marquee-child"}).text | |||
product = product.replace('\n', ' ') | |||
product = product.replace(",", "") | |||
product = product.replace("...", "") | |||
product = product.strip() | |||
name.append(product) | |||
# Finding Product Image | |||
product_image = a.find('img') | |||
product_image = product_image.get('src') | |||
product_image = product_image.split('base64,')[-1] | |||
image.append(product_image) | |||
# Finding Prices | |||
price = a.findAll('a', {"class": "text-white"})[-1].text | |||
price = price.replace("$","") | |||
price = price.strip() | |||
USD.append(price) | |||
# Finding Item Rating | |||
temp = a.find('small', {"class": "text-white"}) | |||
rating = len(temp.findAll('i', {"class": "fas fa-star"})) | |||
half_stars = len(temp.findAll('i', {'class': "fas fa-star-half-alt"})) | |||
if half_stars > 0: | |||
rating += 0.5 | |||
rating_item.append(str(rating)) | |||
# Finding the Vendor | |||
vendor_name = a.find('a', {"class": 'badge badge-dark w-100 text-white my-1'}).text | |||
vendor_name = vendor_name.replace(",", "") | |||
vendor_name = vendor_name.strip() | |||
vendor.append(vendor_name) | |||
image_vendor.append("-1") | |||
# Searching for CVE and MS categories | |||
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if not cve: | |||
cveValue = "-1" | |||
else: | |||
cee = " " | |||
for idx in cve: | |||
cee += (idx) | |||
cee += " " | |||
cee = cee.replace(',', ' ') | |||
cee = cee.replace('\n', '') | |||
cveValue = cee | |||
CVE.append(cveValue) | |||
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if not ms: | |||
MSValue = "-1" | |||
else: | |||
me = " " | |||
for im in ms: | |||
me += (im) | |||
me += " " | |||
me = me.replace(',', ' ') | |||
me = me.replace('\n', '') | |||
MSValue = me | |||
MS.append(MSValue) | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, | |||
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor) | |||
# called by the crawler to get description links on a listing page | |||
# @param: beautifulsoup object that is using the correct html page (listing page) | |||
# return: list of description links from a listing page | |||
def abacus_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.findAll('div', {"class": "src-listing p-1 pb-0 flex flex-col justify-between w-full rounded-md border-solid border-[1px] !border-border !bg-white hover:!bg-hover hover:!border-abacus2 !mt-0"}) | |||
for a in listing: | |||
link = a.find('a', href=True).get('href') | |||
href.append(link) | |||
return href |
@ -0,0 +1,277 @@ | |||
__author__ = 'DarkWeb' | |||
''' | |||
MGMGrand marketplace Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support.ui import Select | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
from datetime import date | |||
import subprocess | |||
import configparser | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.MGMGrand.parser import mgm_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
counter = 1 | |||
baseURL = 'http://duysanjqxo4svh35yqkxxe5r54z2xc5tjf6r3ichxd3m2rwcgabf44ad.onion' | |||
def startCrawling(): | |||
mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closeDriver(driver) | |||
new_parse(mktName, baseURL, True) | |||
# Returns the name of the website | |||
def getMKTName(): | |||
name = 'MGMGrand' | |||
return name | |||
# Return the base link of the website | |||
def getFixedURL(): | |||
url = 'http://duysanjqxo4svh35yqkxxe5r54z2xc5tjf6r3ichxd3m2rwcgabf44ad.onion' | |||
return url | |||
# Closes Tor Browser | |||
def closeDriver(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
from MarketPlaces.Initialization.markets_mining import config | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 3) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", False) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
driver.maximize_window() | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
def login(driver): | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, '//*[@id="username"]'))) | |||
# entering username and password into input boxes | |||
usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]') | |||
# Username here | |||
usernameBox.send_keys('blabri') | |||
passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]') | |||
# Password here | |||
passwordBox.send_keys('fishowal') | |||
input("Press ENTER when captcha is solved and you're logged in") | |||
# Wait for the element to be visible | |||
WebDriverWait(driver, 100).until( | |||
EC.visibility_of_element_located((By.XPATH, '/html/body/div[2]/div[2]/a/span')) | |||
) | |||
# Find the element and click it | |||
temp = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/a/span') | |||
temp.click() | |||
def savePage(driver, page, url): | |||
cleanPage = cleanHTML(driver, page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
def getFullPathName(url): | |||
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE | |||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') | |||
else: | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') | |||
return fullPath | |||
def getMKTName() -> str: | |||
name = 'MGMGrand' | |||
return name | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if name == '': | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
def getInterestedLinks(): | |||
links = [] | |||
# Carding | |||
links.append('http://duysanjqxo4svh35yqkxxe5r54z2xc5tjf6r3ichxd3m2rwcgabf44ad.onion/category/c54f4d30-9060-11eb-be9c-630550815967') | |||
# # softwares and malwares | |||
# links.append('http://duysanjqxo4svh35yqkxxe5r54z2xc5tjf6r3ichxd3m2rwcgabf44ad.onion/category/6a211fa0-9062-11eb-b3bd-d7d946c69ce2') | |||
# # social engineering tutorial | |||
# links.append('http://duysanjqxo4svh35yqkxxe5r54z2xc5tjf6r3ichxd3m2rwcgabf44ad.onion/category/5596f690-9063-11eb-9ebb-6b44dbdbc454') | |||
# # misc services | |||
# links.append('http://duysanjqxo4svh35yqkxxe5r54z2xc5tjf6r3ichxd3m2rwcgabf44ad.onion/category/b212fe30-9063-11eb-a479-35581612f6c2') | |||
# # hacking tutorials | |||
# links.append('http://duysanjqxo4svh35yqkxxe5r54z2xc5tjf6r3ichxd3m2rwcgabf44ad.onion/category/2ed713f0-9063-11eb-bf9c-232fd3b49a98') | |||
return links | |||
def crawlForum(driver): | |||
print("Crawling the MGM market") | |||
linksToCrawl = getInterestedLinks() | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
has_next_page = True | |||
count = 0 | |||
while has_next_page: | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(driver, html, link) | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver, driver.page_source, item) | |||
driver.back() | |||
# # comment out | |||
break | |||
# | |||
# # comment out | |||
# if count == 4: | |||
# break | |||
try: | |||
li_tags = driver.find_element(by=By.CLASS_NAME, value='pagination mb-0').find_elements(by=By.TAG_NAME, value='li') | |||
a_tag = li_tags[-1].find_element(by=By.TAG_NAME, value='a') | |||
if a_tag: | |||
try: | |||
link = a_tag.get_attribute('href') | |||
print(link) | |||
except: | |||
link = '' | |||
if link == "": | |||
raise NoSuchElementException | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
print("Crawling the MGMGrand market done.") | |||
# Returns 'True' if the link is Topic link, may need to change for every website | |||
def isDescriptionLink(url): | |||
if 'product' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link, may need to change for every website | |||
def isListingLink(url): | |||
if 'category' in url: | |||
return True | |||
return False | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return mgm_links_parser(soup) | |||
def crawler(): | |||
startCrawling() |
@ -0,0 +1,250 @@ | |||
__author__ = 'DarkWeb' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
# stores info it needs in different lists, these lists are returned after being organized | |||
# @param: soup object looking at html page of description page | |||
# return: 'row' that contains a variety of lists that each hold info on the description page | |||
def mgm_description_parser(soup): | |||
# Fields to be parsed | |||
vendor = "-1" # 0 *Vendor_Name | |||
success = "-1" # 1 Vendor_Successful_Transactions | |||
rating_vendor = "-1" # 2 Vendor_Rating | |||
name = "-1" # 3 *Product_Name | |||
describe = "-1" # 4 Product_Description | |||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) | |||
category = "-1" # 7 Product_Category | |||
views = "-1" # 8 Product_Number_Of_Views | |||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||
rating_item = "-1" # 10 Product_Rating | |||
addDate = "-1" # 11 Product_AddedDate | |||
BTC = "-1" # 12 Product_BTC_SellingPrice | |||
USD = "-1" # 13 Product_USD_SellingPrice | |||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||
sold = "-1" # 15 Product_QuantitySold | |||
left = "-1" # 16 Product_QuantityLeft | |||
shipFrom = "-1" # 17 Product_ShippedFrom | |||
shipTo = "-1" # 18 Product_ShippedTo | |||
image = "-1" # 19 Product_Image | |||
vendor_image = "-1" # 20 Vendor_Image | |||
# Finding Product Name | |||
divmb = soup.find('div', {'class': "col-md-12 my-2"}) | |||
name = divmb.find('span', {'class': "btn btn-sm btn-outline-dark w-100 active rounded-0"}).text | |||
name = name.replace('\n', ' ') | |||
name = name.replace(",", "") | |||
name = name.strip() | |||
box = soup.find('div', {'class': "col-md-7"}).find('span') | |||
box = box.findAll('span', {'class': "btn btn-mgray btn-sm w-100 active border-danger"}) | |||
# Finding Vendor | |||
vendor = soup.find('a', {'class': "btn btn-sm btn-mgray my-1 w-100 text-white"}).get('href') | |||
vendor = vendor.split('otherParty=')[-1] | |||
vendor = cleanString(vendor).strip() | |||
# Finding Vendor Rating | |||
temp = box[1] | |||
rating_vendor = len(temp.findAll('i', {"class": "fas fa-star"})) | |||
half_stars = len(temp.findAll('i', {'class': "fas fa-star-half-alt"})) | |||
if half_stars > 0: | |||
rating_vendor += 0.5 | |||
# Finding Successful Transactions | |||
success = box[2].text | |||
success = cleanNumbers(success).strip() | |||
box2 = soup.find('div', {"class": "col-md-4 text-center"}).find('span', {"class": "text-left"}).findAll('span') | |||
# Finding USD | |||
USD = box2[0].text | |||
USD = USD.replace('\n', '') | |||
USD = USD.replace('$', '') | |||
USD = USD.strip() | |||
# Finding Vendor Image | |||
vendor_image = soup.find('img', {"class": 'img-fluid'}).get('src') | |||
vendor_image = vendor_image.split('base64,')[-1] | |||
# Finding the Product description | |||
temp = soup.find('div', {"class": 'row-md-12'}).find('div', {"class": 'col-md-4'}) | |||
cardbody = temp.find('textarea', {"class": 'disabled form-control form-control-sm w-100 bg-mgray text-white rounded-0 border-danger'}) | |||
describe = cleanString(cardbody.text).strip() | |||
# Finding Product Image | |||
image = soup.find('div', {"class": 'row-md-12'}).find('div', {"class": 'col-md-4 text-center'}).find('img') | |||
if image is not None: | |||
image = image.get('src') | |||
image = image.split('base64,')[-1] | |||
else: | |||
image = "-1" | |||
# Searching for CVE and MS categories | |||
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if cve: | |||
CVE = " " | |||
for idx in cve: | |||
CVE += (idx) | |||
CVE += " " | |||
CVE = CVE.replace(',', ' ') | |||
CVE = CVE.replace('\n', '') | |||
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if ms: | |||
MS = " " | |||
for im in ms: | |||
MS += (im) | |||
MS += " " | |||
MS = MS.replace(',', ' ') | |||
MS = MS.replace('\n', '') | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, | |||
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image) | |||
# Sending the results | |||
return row | |||
# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
# stores info it needs in different lists, these lists are returned after being organized | |||
# @param: soup object looking at html page of listing page | |||
# return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def mgm_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # *Total_Products (Should be Integer) | |||
mktName = "Ares" # 0 *Marketplace_Name | |||
vendor = [] # 1 *Vendor y | |||
rating_vendor = [] # 2 Vendor_Rating | |||
success = [] # 3 Vendor_Successful_Transactions | |||
name = [] # 4 *Product_Name y | |||
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this | |||
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this | |||
category = [] # 7 Product_Category y | |||
describe = [] # 8 Product_Description | |||
views = [] # 9 Product_Number_Of_Views | |||
reviews = [] # 10 Product_Number_Of_Reviews | |||
rating_item = [] # 11 Product_Rating | |||
addDate = [] # 12 Product_AddDate | |||
BTC = [] # 13 Product_BTC_SellingPrice | |||
USD = [] # 14 Product_USD_SellingPrice y | |||
EURO = [] # 15 Product_EURO_SellingPrice | |||
sold = [] # 16 Product_QuantitySold | |||
qLeft = [] # 17 Product_QuantityLeft | |||
shipFrom = [] # 18 Product_ShippedFrom | |||
shipTo = [] # 19 Product_ShippedTo | |||
image = [] # 20 Product_Image | |||
image_vendor = [] # 21 Vendor_Image | |||
href = [] # 22 Product_Links | |||
cat = soup.find('span', {"class": "btn btn-sm btn-outline-dark w-100 active"}).text | |||
cat = cleanString(cat).strip() | |||
listing = soup.find('div', {"class": 'card-body text-black text-left bg-dark'}).findAll('div', {"class": 'card mb-4 border-danger rounded-0'}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
category.append(cat) | |||
# Adding the url to the list of urls | |||
link = a.find('a', {'class': "badge badge-danger w-100 text-white"}).get('href') | |||
link = cleanLink(link) | |||
href.append(link) | |||
# Finding the Product name | |||
product = a.find('div', {"class": 'marquee-parent'}).find('div', {"class": "marquee-child"}).text | |||
product = product.replace('\n', ' ') | |||
product = product.replace(",", "") | |||
product = product.replace("...", "") | |||
product = product.strip() | |||
name.append(product) | |||
# Finding Product Image | |||
product_image = a.find('img') | |||
product_image = product_image.get('src') | |||
product_image = product_image.split('base64,')[-1] | |||
image.append(product_image) | |||
# Finding Prices | |||
price = a.findAll('a', {"class": "text-white"})[-1].text | |||
price = price.replace("$","") | |||
price = price.strip() | |||
USD.append(price) | |||
# Finding Item Rating | |||
temp = a.find('small', {"class": "text-white"}) | |||
rating = len(temp.findAll('i', {"class": "fas fa-star"})) | |||
half_stars = len(temp.findAll('i', {'class': "fas fa-star-half-alt"})) | |||
if half_stars > 0: | |||
rating += 0.5 | |||
rating_item.append(str(rating)) | |||
# Finding the Vendor | |||
vendor_name = a.find('a', {"class": 'badge badge-dark w-100 text-white my-1'}).text | |||
vendor_name = vendor_name.replace(",", "") | |||
vendor_name = vendor_name.strip() | |||
vendor.append(vendor_name) | |||
image_vendor.append("-1") | |||
# Searching for CVE and MS categories | |||
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if not cve: | |||
cveValue = "-1" | |||
else: | |||
cee = " " | |||
for idx in cve: | |||
cee += (idx) | |||
cee += " " | |||
cee = cee.replace(',', ' ') | |||
cee = cee.replace('\n', '') | |||
cveValue = cee | |||
CVE.append(cveValue) | |||
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if not ms: | |||
MSValue = "-1" | |||
else: | |||
me = " " | |||
for im in ms: | |||
me += (im) | |||
me += " " | |||
me = me.replace(',', ' ') | |||
me = me.replace('\n', '') | |||
MSValue = me | |||
MS.append(MSValue) | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, | |||
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor) | |||
# called by the crawler to get description links on a listing page | |||
# @param: beautifulsoup object that is using the correct html page (listing page) | |||
# return: list of description links from a listing page | |||
def mgm_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.find('div', {"class": "list-products columns-3"}).find_all('div', {'class':'product-item hover-shadow'}) | |||
for a in listing: | |||
bae = a.findAll('a', href=True) | |||
# Adding the url to the list of urls | |||
link = bae[0].get('href') | |||
href.append(link) | |||
return href |