Browse Source

completed atlas marketplace

main
Helium 8 months ago
parent
commit
8b0993d43a
2 changed files with 556 additions and 0 deletions
  1. +283
    -0
      MarketPlaces/Atlas/crawler_selenium.py
  2. +273
    -0
      MarketPlaces/Atlas/parser.py

+ 283
- 0
MarketPlaces/Atlas/crawler_selenium.py View File

@ -0,0 +1,283 @@
__author__ = 'DarkWeb'
'''
Atlas Marketplace Crawler (Selenium)
'''
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.Atlas.parser import atlas_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
counter = 1
baseURL = 'http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion'
def startCrawling():
mktName = getMKTName()
# driver = getAccess()
#
# if driver != 'down':
# try:
# login(driver)
# crawlForum(driver)
# except Exception as e:
# print(driver.current_url, e)
# closeDriver(driver)
new_parse(mktName, baseURL, True)
# Returns the name of the website
def getMKTName():
name = 'Atlas'
return name
# Return the base link of the website
def getFixedURL():
url = 'http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion'
return url
# Closes Tor Browser
def closeDriver(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
from MarketPlaces.Initialization.markets_mining import config
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
# ff_prof.set_preference("network.dns.disablePrefetch", True)
# ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
driver.maximize_window()
return driver
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
def agreeToTerms(driver):
try:
agree_button = driver.find_element(by=By.XPATH, value='//input[@type="submit" and @name="agree" and @value="Yes, I agree"]')
agree_button.click()
except Exception as e:
print('Problem with clicking agree button', e)
def login(driver):
# input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
input("Press ENTER when captcha is solved")
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '//*[@id="username"]')))
# entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
# Username here
usernameBox.send_keys('atldark')
passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]')
# Password here
passwordBox.send_keys('a1T2l3dark')
input("Press ENTER when captcha is solved and you're logged in")
#
# try:
# agree_button = driver.find_element(by=By.XPATH, value='/html/body/main/div/form/div[6]/button')
# agree_button.click()
# except Exception as e:
# print('Problem with clicking login button', e)
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
return fullPath
def getMKTName() -> str:
name = 'Atlas'
return name
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if name == '':
name = str(counter)
counter = counter + 1
return name
def getInterestedLinks():
links = []
# hacking
# links.append('http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion/c/hacking')
# fraud
links.append('http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion/c/fraud')
# software
links.append('http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion/c/software')
# services
# links.append('http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion/c/services')
return links
def crawlForum(driver):
print("Crawling the Atlas market")
linksToCrawl = getInterestedLinks()
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
has_next_page = True
count = 0
while has_next_page:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, link)
list = productPages(html)
for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver, driver.page_source, item)
driver.back()
# comment out
break
# comment out
# if count == 1:
# break
try:
temp = driver.find_element(by=By.XPATH, value='/html/body/div[2]/form/section/div/div/div[2]/div[2]')
temp = temp.find_elements(by=By.TAG_NAME, value='a')
a_tag = temp[-1]
if a_tag:
temp = a_tag.find_element(by=By.CLASS_NAME, value='double-arrow-right')
if temp:
link = a_tag.get_attribute('href')
else:
link = ''
if link == "":
raise NoSuchElementException
count += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
print("Crawling the vortex market done.")
# Returns 'True' if the link is Topic link, may need to change for every website
def isDescriptionLink(url):
if 'product' in url:
return True
return False
# Returns True if the link is a listingPage link, may need to change for every website
def isListingLink(url):
if '/c/' in url:
return True
return False
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return atlas_links_parser(soup)
def crawler():
startCrawling()

+ 273
- 0
MarketPlaces/Atlas/parser.py View File

@ -0,0 +1,273 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
import re
# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
# stores info it needs in different lists, these lists are returned after being organized
# @param: soup object looking at html page of description page
# return: 'row' that contains a variety of lists that each hold info on the description page
def atlas_description_parser(soup):
# Fields to be parsed
vendor = "-1" # 0 *Vendor_Name y
success = "-1" # 1 Vendor_Successful_Transactions n
rating_vendor = "-1" # 2 Vendor_Rating y
name = "-1" # 3 *Product_Name y
describe = "-1" # 4 Product_Description y
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category y
views = "-1" # 8 Product_Number_Of_Views y
reviews = "-1" # 9 Product_Number_Of_Reviews n
rating_item = "-1" # 10 Product_Rating n
addDate = "-1" # 11 Product_AddedDate y
BTC = "-1" # 12 Product_BTC_SellingPrice y
USD = "-1" # 13 Product_USD_SellingPrice y
EURO = "-1" # 14 Product_EURO_SellingPrice n
sold = "-1" # 15 Product_QuantitySold n
left = "-1" # 16 Product_QuantityLeft n
shipFrom = "-1" # 17 Product_ShippedFrom n
shipTo = "-1" # 18 Product_ShippedTo n
image = "-1" # 19 Product_Image n
vendor_image = "-1" # 20 Vendor_Image n
# Finding Vendor
vendor = soup.find('div', {'class', 'flex items-center space-x-3'})
vendor = vendor.find('div', {'class', 'flex items-center space-x-3'}).find('a').text
vendor = cleanString(vendor).strip()
# sold
try:
sell = soup.find('p', {'class': 'pl-3 text-sm font-semibold text-slate-700 dark:text-slate-400'}).text
reg = r'product sold (.+)'
sell = re.search(reg, sell)
if sell:
sell = sell.group(1)
sell = sell.replace('product sold ', '').replace('time', '').replace('s', '')
sell = cleanString(sell.strip())
except:
sell = '-1'
sold = sell
# successful transaction
suc = soup.find('div', {'class': 'mt-4 grid w-full grid-cols-6 gap-4 rounded border p-3 shadow-sm dark:border-slate-700'}).find_all('div')
suc = suc[2].find_all('p')
suc = suc[1].text
success = cleanString(suc.strip())
# Finding Vendor Rating (bug in their system shows standard rating)
rating_vendor = '-1'
rating_vendor = cleanNumbers(rating_vendor).strip()
# Finding Product Name
name = soup.find('h2').text
name = cleanString(name).strip()
# Finding Product description
try:
describe = soup.find('div', {'class': 'prose mb-12 mt-5 break-words dark:prose-invert'}).text
describe = cleanString(describe).strip()
except:
describe = '-1'
# Finding category
try:
div_category = soup.find('ol', {'class': "leading-node flex items-center gap-1 text-sm font-medium text-gray-600 dark:text-gray-300"}).find_all('li', class_=lambda x: x is None)
category = div_category[1].find('a').text
except:
category = '-1'
category = cleanString(category).strip()
# Product rating - check
rating = '-1'
rating_item = cleanString(rating).strip()
# Finding BTC and USD/GOLD
BTC = '-1'
usd = soup.find('h1', {'class': 'text-2xl font-bold leading-none tracking-tight text-slate-800 dark:text-slate-300'}).text
if '$' in usd:
usd = usd.replace('$', '')
usd = cleanString(usd).strip()
USD = usd
try:
image = soup.find('div', {'class', 'w-full flex-1 flex-shrink-0'}).find('img').get('src').split('base64,')[-1]
except:
image = '-1'
# Populating the final variable (this should be a list with all fields scraped)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
# Sending the results
return row
# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
# stores info it needs in different lists, these lists are returned after being organized
# @param: soup object looking at html page of listing page
# return: 'row' that contains a variety of lists that each hold info on the listing page
def atlas_listing_parser(soup):
# Fields to be parsed
nm = 0 # *Total_Products (Should be Integer)
mktName = "Atlas" # 0 *Marketplace_Name y
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating y
success = [] # 3 Vendor_Successful_Transactions n
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description n
views = [] # 9 Product_Number_Of_Views y
reviews = [] # 10 Product_Number_Of_Reviews n
rating_item = [] # 11 Product_Rating n
addDate = [] # 12 Product_AddDate y
BTC = [] # 13 Product_BTC_SellingPrice y
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice n
sold = [] # 16 Product_QuantitySold n
qLeft = [] # 17 Product_QuantityLeft n
shipFrom = [] # 18 Product_ShippedFrom n
shipTo = [] # 19 Product_ShippedTo n
image = [] # 20 Product_Image n
image_vendor = [] # 21 Vendor_Image n
href = [] # 22 Product_Links y
listings = soup.find('div', {'class': 'grid h-fit grid-cols-1 gap-x-8 gap-y-10 lg:grid-cols-4 md:grid-cols-3'}).findAll('div', class_=lambda x: x is None)
temp = soup.find('h1', {'class': 'text-4xl font-bold tracking-tight text-gray-900 dark:text-slate-200'}).text
reg = r'results in (.+)'
cat = re.search(reg, temp)
if cat:
cat = cat.group(1)
cat = cat.replace('results in ', '')
# Populating the Number of Products
nm = len(listings)
for listing in listings:
# Finding vendor name
vendor_name = listing.find('p', {'class': 'text-sm font-medium leading-none dark:text-slate-100'}).text
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding the vendor rating
vendor_level = listing.find('p', {'class': "flex space-x-1 text-xs text-gray-700 dark:text-slate-500"}).text
vendor_level = vendor_level.strip().split(' ')
vendor_level = cleanNumbers(vendor_level[-1])
rating_vendor.append(vendor_level)
# Finding the product name
product_name = listing.find('p', {'class':'my-1 line-clamp-2 text-sm text-slate-700 group-hover:underline dark:text-slate-300'}).text
product_name = cleanString(product_name).strip()
name.append(product_name)
# Searching for CVE and MS categories
cve = listing.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue = "-1"
else:
cee = " "
for idx in cve:
cee += (idx)
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cee = cee.replace('\t', '')
cveValue = cee
CVE.append(cveValue)
ms = listing.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue = "-1"
else:
me = " "
for im in ms:
me += (im)
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue = me
MS.append(MSValue)
# Finding the category
category_text = cleanString(cat).strip()
category.append(category_text)
# Finding the hrefs
description_link = listing.find('a', {'class': 'group relative block'})['href']
href.append(description_link)
# Finding the views - check
views_text = '-1'
views.append(views_text)
# Finding the date added
date = '-1'
# date = datetime.strptime(date, "%d-%m-%Y")
addDate.append(date)
# EURO
EURO.append('-1')
# Finding the BTC and USD/GOLD
btc_price = '-1'
BTC.append(btc_price)
try:
money = listing.find('p', {'class': 'mt-2 text-lg font-bold tracking-wider text-slate-900 dark:text-slate-200'}).text
if '$' in money:
usd_price = money.strip().replace('$', '')
usd_price = cleanString(usd_price).strip()
except:
usd_price = '-1'
USD.append(usd_price)
sold.append('-1')
qLeft.append('-1')
shipTo.append('-1')
shipf = '-1'
shipFrom.append(shipf)
try:
temp = listing.find('div', {'class', 'overflow-hidden rounded-lg'}).find('img').get('src').split('base64,')[-1]
except:
temp = '-1'
image.append(temp)
image_vendor.append('-1')
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image,
image_vendor)
# called by the crawler to get description links on a listing page
# @param: beautifulsoup object that is using the correct html page (listing page)
# return: list of description links from a listing page
def atlas_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listings = soup.find('div', {'class': 'grid h-fit grid-cols-1 gap-x-8 gap-y-10 lg:grid-cols-4 md:grid-cols-3'}).findAll('div')
for listing in listings:
# Adding the url to the list of urls
try:
description_link = listing.find('a', {'class': 'group relative block'}).get('href')
href.append(description_link)
except:
pass
return href

Loading…
Cancel
Save