Browse Source

completed vortex marketplace

main
Helium 8 months ago
parent
commit
00792f64d7
2 changed files with 567 additions and 0 deletions
  1. +279
    -0
      MarketPlaces/Vortex/crawler_selenium.py
  2. +288
    -0
      MarketPlaces/Vortex/parser.py

+ 279
- 0
MarketPlaces/Vortex/crawler_selenium.py View File

@ -0,0 +1,279 @@
__author__ = 'DarkWeb'
'''
Vortex Marketplace Crawler (Selenium)
'''
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.Vortex.parser import vortex_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
counter = 1
baseURL = 'http://mq7ozbnrqdjc6cof3yakegs44kmo6vl3ajcyzdeya3zjtmi65jtmwqid.onion/login'
def startCrawling():
mktName = getMKTName()
# driver = getAccess()
#
# if driver != 'down':
# try:
# login(driver)
# crawlForum(driver)
# except Exception as e:
# print(driver.current_url, e)
# closeDriver(driver)
new_parse(mktName, baseURL, True)
# Returns the name of the website
def getMKTName():
name = 'Vortex'
return name
# Return the base link of the website
def getFixedURL():
url = 'http://mq7ozbnrqdjc6cof3yakegs44kmo6vl3ajcyzdeya3zjtmi65jtmwqid.onion/login'
return url
# Closes Tor Browser
def closeDriver(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
from MarketPlaces.Initialization.markets_mining import config
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)
ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
driver.maximize_window()
return driver
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
def agreeToTerms(driver):
try:
agree_button = driver.find_element(by=By.XPATH, value='//input[@type="submit" and @name="agree" and @value="Yes, I agree"]')
agree_button.click()
except Exception as e:
print('Problem with clicking agree button', e)
def login(driver):
# input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '//*[@id="username"]')))
# entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
# Username here
usernameBox.send_keys('darkcorn')
passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="pwd"]')
# Password here
passwordBox.send_keys('CoRNNNN$!1')
input("Press ENTER when captcha is solved")
try:
agree_button = driver.find_element(by=By.NAME, value='login')
agree_button.click()
except Exception as e:
print('Problem with clicking login button', e)
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '//*[@id="main"]')))
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
return fullPath
def getMKTName() -> str:
name = 'Vortex'
return name
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if name == '':
name = str(counter)
counter = counter + 1
return name
def getInterestedLinks():
links = []
# security and hacking
# links.append('http://mq7ozbnrqdjc6cof3yakegs44kmo6vl3ajcyzdeya3zjtmi65jtmwqid.onion/home?cat=Security+%26+Hacking')
# fraud
links.append('http://mq7ozbnrqdjc6cof3yakegs44kmo6vl3ajcyzdeya3zjtmi65jtmwqid.onion/home?cat=Fraud')
# malware, nothing here for now
# links.append('http://mq7ozbnrqdjc6cof3yakegs44kmo6vl3ajcyzdeya3zjtmi65jtmwqid.onion/home?cat=Cracked+softwares%26comma%3B+Botnets+%26+Malware')
return links
def crawlForum(driver):
print("Crawling the vortex market")
linksToCrawl = getInterestedLinks()
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
has_next_page = True
count = 0
counter = 1
while has_next_page:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, link)
list = productPages(html)
for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver, driver.page_source, item)
driver.back()
# comment out
break
#
# comment out
if count == 1:
break
try:
temp = driver.find_element(by=By.XPATH, value = '//*[@id="main"]')
temp = temp.find_element(by=By.CSS_SELECTOR, value='section[aria-label="Page Navigation"]')
temp = temp.find_element(by=By.CLASS_NAME, value='pagination')
temp = temp.find_elements(by=By.TAG_NAME, value='li')
link = temp[counter].find_element(by=By.CSS_SELECTOR, value='a.page-link.text-dark').get_attribute('href')
if link == "":
raise NoSuchElementException
count += 1
counter += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
print("Crawling the vortex market done.")
# Returns 'True' if the link is Topic link, may need to change for every website
def isDescriptionLink(url):
if 'product' in url:
return True
return False
# Returns True if the link is a listingPage link, may need to change for every website
def isListingLink(url):
if 'home?' in url:
return True
return False
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return vortex_links_parser(soup)
def crawler():
startCrawling()

+ 288
- 0
MarketPlaces/Vortex/parser.py View File

@ -0,0 +1,288 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
import re
# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
# stores info it needs in different lists, these lists are returned after being organized
# @param: soup object looking at html page of description page
# return: 'row' that contains a variety of lists that each hold info on the description page
def vortex_description_parser(soup):
# Fields to be parsed
vendor = "-1" # 0 *Vendor_Name y
success = "-1" # 1 Vendor_Successful_Transactions n
rating_vendor = "-1" # 2 Vendor_Rating y
name = "-1" # 3 *Product_Name y
describe = "-1" # 4 Product_Description y
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category y
views = "-1" # 8 Product_Number_Of_Views y
reviews = "-1" # 9 Product_Number_Of_Reviews n
rating_item = "-1" # 10 Product_Rating n
addDate = "-1" # 11 Product_AddedDate y
BTC = "-1" # 12 Product_BTC_SellingPrice y
USD = "-1" # 13 Product_USD_SellingPrice y
EURO = "-1" # 14 Product_EURO_SellingPrice n
sold = "-1" # 15 Product_QuantitySold n
left = "-1" # 16 Product_QuantityLeft n
shipFrom = "-1" # 17 Product_ShippedFrom n
shipTo = "-1" # 18 Product_ShippedTo n
image = "-1" # 19 Product_Image n
vendor_image = "-1" # 20 Vendor_Image n
temp2 = soup.find('div', {'class', 'col-auto font-weight-bold'})
# Finding Vendor - check
vendor = temp2.find('a').text
vendor = cleanString(vendor).strip()
# Finding Vendor Rating (bug in their system shows standard rating)
try:
rating_vendor = temp2.find('span', {'class': "badge badge-pill mr-2"}).text
rating_vendor = rating_vendor.replace('VendorLvl:', '')
except:
rating_vendor = '-1'
rating_vendor = cleanNumbers(rating_vendor).strip()
# Finding Product Name - check
name = soup.find('h3', {'class': "d-inline-block font-weight-bold"}).text
name = cleanString(name).strip()
# Finding Product description - check
try:
describe = soup.find('p', {'class': 'description pl-3 py-2 border rounded-3'}).text
describe = cleanString(describe).strip()
except:
describe = '-1'
# print(describe)
# Finding category - checl
div_category = soup.find('ol', {'class': "breadcrumb breadcrumb-navbar py-0 px-2 px-md-4 m-0 ml-2 mx-auto justify-content-center h-auto d-flex bg-transparent"}).find_all('li')
category = div_category[1].find('a').text
category = cleanString(category).strip()
# Product rating - check
try:
rating = temp2.find('span', {'class': 'text-success mr-2 py-1'}).text
rating = re.sub(r'[()+%]', '', rating)
except:
rating = '-1'
rating_item = cleanString(rating).strip()
# Finding BTC and USD/GOLD
div_price = soup.find('h5', {'class': "product-price"}).find('span', {'class', 'small product-price ml-2'}).text
div_price = div_price.split('|')
bit = div_price[0]
if 'btc' in bit:
bit = bit.replace('btc', '')
bit = cleanString(bit).strip()
BTC = bit
usd = div_price[2]
if 'usdt_tron' in usd:
usd = usd.replace('usdt_tron', '')
usd = cleanString(usd).strip()
USD = usd
left = soup.find('div', {'class', 'row mt-3'}).find('div', {'class', 'col-6 font-weight-bold'}).text
left = cleanString(left).strip()
try:
image = soup.find('li', {'class', 'carousel__slide'}).find('img').get('src').split('base64,')[-1]
except:
image = '-1'
# Populating the final variable (this should be a list with all fields scraped)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
# Sending the results
return row
# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
# stores info it needs in different lists, these lists are returned after being organized
# @param: soup object looking at html page of listing page
# return: 'row' that contains a variety of lists that each hold info on the listing page
def vortex_listing_parser(soup):
# Fields to be parsed
nm = 0 # *Total_Products (Should be Integer)
mktName = "Vortex" # 0 *Marketplace_Name y
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating y
success = [] # 3 Vendor_Successful_Transactions n
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description n
views = [] # 9 Product_Number_Of_Views y
reviews = [] # 10 Product_Number_Of_Reviews n
rating_item = [] # 11 Product_Rating n
addDate = [] # 12 Product_AddDate y
BTC = [] # 13 Product_BTC_SellingPrice y
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice n
sold = [] # 16 Product_QuantitySold n
qLeft = [] # 17 Product_QuantityLeft n
shipFrom = [] # 18 Product_ShippedFrom n
shipTo = [] # 19 Product_ShippedTo n
image = [] # 20 Product_Image n
image_vendor = [] # 21 Vendor_Image n
href = [] # 22 Product_Links y
temp = soup.find('main', {'id': 'main'}).find('section', {'id':'page_container'})
listings = temp.findAll('div', {"class": "product-card col-sm-6 col-md-3 col-xl-4 mb-5"})
cat = soup.find('section', {'class': 'row px-md-4 mx-0 mb-3'}).find('ol').find_all('li')
cat = cat[1].find('a').text
# Populating the Number of Products
nm = len(listings)
for listing in listings:
listing = listing.find('div', {'class': 'product-details'})
# Finding vendor name - checked
vendor_name = listing.find('span', {'class': 'd-inline-block w-100 mb-1'}).find('a').text
if 'ships from' in vendor_name:
reg = re.compile(r'(.+?)\'s shop')
vendor_name = reg.match(vendor_name)
if vendor_name:
vendor_name = vendor_name.group(1)
vendor_name = vendor_name.replace('\'s shop', '')
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding the vendor rating - checked
vendor_level = listing.find('span', {'class': "badge badge-pill mr-2 font-weight-normal"}).text
vendor_level = vendor_level.split(' ')
vendor_level = cleanNumbers(vendor_level[-1])
rating_vendor.append(vendor_level)
# Finding the product name - checked
product_name = listing.find('h4').find('a').text
product_name = cleanString(product_name).strip()
name.append(product_name)
# Searching for CVE and MS categories
cve = listing.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue = "-1"
else:
cee = " "
for idx in cve:
cee += (idx)
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cee = cee.replace('\t', '')
cveValue = cee
CVE.append(cveValue)
ms = listing.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue = "-1"
else:
me = " "
for im in ms:
me += (im)
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue = me
MS.append(MSValue)
# Finding the category - check
category_text = cleanString(cat).strip()
category.append(category_text)
# Finding the hrefs - check
description_link = listing.find('h4').find('a')['href']
href.append(description_link)
# Finding the views - check
views_text = '-1'
views.append(views_text)
# Finding the date added
date = '-1'
# date = datetime.strptime(date, "%d-%m-%Y")
addDate.append(date)
# EURO
EURO.append('-1')
# Finding the BTC and USD/GOLD
try:
money = listing.find('div', {"class": 'w-100 small product-price mb-1 text-right'}).text
money = money.split('|')
if 'btc' in money[0]:
btc_price = money[0].replace('btc', '')
btc_price = cleanString(btc_price).strip()
except:
btc_price = '-1'
BTC.append(btc_price)
try:
if 'usd' in money[2]:
usd_price = money[2].strip().replace('usdt_tron', '')
usd_price = cleanString(usd_price).strip()
except:
usd_price = '-1'
USD.append(usd_price)
sold.append('-1')
qLeft.append('-1')
shipTo.append('-1')
shipf = listing.find('span', {'class': 'd-inline-block w-100 mb-1'}).find('a').text
if 'ships from' in shipf:
pattern = re.compile(r"ships from (.*)")
shipf = pattern.search(shipf)
if shipf:
shipf = shipf.group(1)
shipf = shipf.replace('ships from', '')
else:
shipf = '-1'
shipf = cleanString(shipf).strip()
shipFrom.append(shipf)
try:
temp = listing.find('p', {'class', 'w-100 mb-2'}).find('img').get('src').split('base64,')[-1]
except:
temp = '-1'
image.append(temp)
image_vendor.append('-1')
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image,
image_vendor)
# called by the crawler to get description links on a listing page
# @param: beautifulsoup object that is using the correct html page (listing page)
# return: list of description links from a listing page
def vortex_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listings = soup.find('main').findAll('div', {"class": "product-card col-sm-6 col-md-3 col-xl-4 mb-5"})
for listing in listings:
# Adding the url to the list of urls
description_link = listing.find('h4').find('a')['href']
href.append(description_link)
return href

Loading…
Cancel
Save