Browse Source

Added crawler and parser for Black Pyramid Marketplace

main
chris 1 year ago
parent
commit
746ec6ddd9
2 changed files with 391 additions and 252 deletions
  1. +178
    -124
      MarketPlaces/BlackPyramid/crawler_selenium.py
  2. +213
    -128
      MarketPlaces/BlackPyramid/parser.py

+ 178
- 124
MarketPlaces/BlackPyramid/crawler_selenium.py View File

@ -1,9 +1,7 @@
__author__ = 'Helium'
__author__ = 'cern'
'''
BlackPyramid Forum Crawler (Selenium)
cannot use bc no links are used
kept in case issues are solved
BlackPyramid Market Crawler (Selenium)
'''
from selenium import webdriver
@ -11,64 +9,101 @@ from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver import ActionChains
import selenium.webdriver.support.ui as uiClasses
from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.BlackPyramid.parser import blackpyramid_links_parser
from MarketPlaces.BlackPyramid.parser import BlackPyramid_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
import traceback
config = configparser.ConfigParser()
config.read('../../setup.ini')
counter = 1
baseURL = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/login/'
baseURL = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/login/?login=1'
# Opens Tor Browser, crawls the website, then parses, then closes tor
#acts like the main method for the crawler, another function at the end of this code calls this function later
# Opens Tor Browser, crawls the website
def startCrawling():
mktName = getMKTName()
# Opening tor beforehand gives "Tor exited during startup error"
# opentor()
marketName = getMarketName()
driver = getAccess()
# Wait for website to load
input("Press ENTER when website has loaded")
if driver != 'down':
try:
login(driver)
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closeDriver(driver)
closetor(driver)
new_parse(marketName, baseURL, False)
new_parse(mktName, baseURL, True)
# Opens Tor Browser
def opentor():
global pid
print("Connecting Tor...")
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
pid = pro.pid
time.sleep(7.5)
input('Tor Connected. Press ENTER to continue\n')
return
# Login
def login(driver):
# entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value="//input[@name='username_login']")
# Username here
usernameBox.send_keys('ChipotleSteakBurrito')
passwordBox = driver.find_element(by=By.XPATH, value="//input[@name='password_login']")
# Password here
passwordBox.send_keys('BlackBeans')
input("Press ENTER when CAPTCHA is completed\n")
# wait for listing page show up (This Xpath may need to change based on different seed url)
#WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
# (By.XPATH, '/html/body/div[2]/div[3]/div[3]/div[1]/div[3]/nav/ul/li[10]/a')))
# Returns the name of the website
#return: name of site in string type
def getMKTName():
def getMarketName():
name = 'BlackPyramid'
return name
# Return the base link of the website
#return: url of base site in string type
# Return the link of the website
def getFixedURL():
url = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/'
url = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/login/?login=1'
return url
# Closes Tor Browser
#@param: current selenium driver
def closeDriver(driver):
def closetor(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
driver.quit()
time.sleep(3)
return
@ -76,8 +111,6 @@ def closeDriver(driver):
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
from MarketPlaces.Initialization.markets_mining import config
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
@ -106,16 +139,13 @@ def createFFDriver():
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
driver.maximize_window()
return driver
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
#return: return the selenium driver or string 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
input('Tor Connected. Press ENTER to continue\n')
try:
driver.get(url)
return driver
@ -124,33 +154,9 @@ def getAccess():
return 'down'
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
# then allows for manual solving of captcha in the terminal
#@param: current selenium web driver
def login(driver):
# wait for login page
login_link = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[3]/div/main/div/div/div/div[2]/div/div/div/section[1]/input[1]')
login_link.click() # open tab with url
# entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
# Username here
usernameBox.send_keys('ChipotleSteakBurrito')
passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]')
# Password here
passwordBox.send_keys('BlackBeans')
input("Press ENTER when CAPTCHA is completed\n")
# wait for listing page show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '/html/body/div[2]/form/nav/nav/ul/li[2]/div/a/span[1]')))
# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
# Saves the crawled html page
def savePage(page, url):
cleanPage = cleanHTML(page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
@ -158,100 +164,148 @@ def savePage(driver, page, url):
# Gets the full path of the page to be saved along with its appropriate file name
#@param: raw url as crawler crawls through every site
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
global counter
from MarketPlaces.Initialization.markets_mining import CURRENT_DATE
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
if (os.path.exists(r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html')):
fullPath = r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + "(" + str(counter) + ")" + '.html'
else:
fullPath = r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html'
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
if (os.path.exists(r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html')):
fullPath = r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + "(" + str(counter) + ")" + '.html'
else:
fullPath = r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html'
return fullPath
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
#@param: raw url as crawler crawls through every site
# Creates the file name from passed URL
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if (name == ''):
if name == '':
name = str(counter)
counter = counter + 1
return name
def goToPage(driver, page):
# hover over digital -> hacking tools
a = ActionChains(driver)
# hover
digitalB = driver.find_element(By.XPATH, "//li[@class='dig940']/div/a")
time.sleep(1)
a.move_to_element(digitalB).perform()
print(digitalB)
# delay for website to register hover
time.sleep(10)
# click
#xpath = "//input[@value='" + page + "']"
xpath = "//input[@name='" + page + "']"
link = driver.find_element(By.XPATH, xpath)
time.sleep(1)
a.move_to_element(link).click().perform()
print(link)
# wait for website to load
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '/html/body/center/div[4]/div[1]/div[3]/article/div[1]/h1/a')))
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
#in this example, there are a couple of categories some threads fall under such as
# Guides and Tutorials, Digital Products, and Software and Malware
#as you can see they are categories of products
def getInterestedLinks():
links = []
# Hacking Guides
links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
# # Exploits
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
# # botnets/malware
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
# # fraud software
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
# # Other Tools
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
# # Services
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
# h11 -> Hacking Tools
# g3 -> Guides, Hacking
# se3 -> Services, Hacking
# f6 -> Fraud software
links = ['h11','g3','se3','f6']
return links
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
#topic and description pages are crawled through here, where both types of pages are saved
#@param: selenium driver
def crawlForum(driver):
print("Crawling the BlackPyramid market")
linksToCrawl = getInterestedLinks()
#linksToCrawl = getInterestedLinks()
#pages = ["Hacking Tools"]
pages = getInterestedLinks()
#visited = set(linksToCrawl)
initialTime = time.time()
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
count = 0
for listing in pages:
#link = linksToCrawl[i]
print('Crawling :', listing)
try:
has_next_page = True
count = 0
try:
goToPage(driver, listing)
except:
print("Try block 1")
driver.refresh()
time.sleep(5)
html = driver.page_source
savePage(html, listing)
has_next_page = True
currentPage = 1
numberOfPages = 1
while has_next_page:
try:
clicker = driver.find_element(by=By.XPATH, value='/html/body/div[2]/form/nav/nav/ul/li[2]/div/a')
clicker.click() # open tab with url
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, link)
# get a list of urls for each listing
list = productPages(html)
for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
print("Try block 2")
driver.refresh()
savePage(driver, driver.page_source, item)
driver.back()
savePage(driver.page_source, item)
# can't use the back button in dark pyramid
# driver.back()
# comment out
break
# break
# comment out
if count == 1:
break
# if count == 1:
# count = 0
# break
# go to next page of market
try:
clicker = driver.find_element(by=By.XPATH, value=
'/html/body/center/div[4]/div/div[3]/div[23]/div[2]/input[1]')
if clicker == "":
goToPage(driver, listing)
nav = driver.find_element(by=By.XPATH, value="//input[@name='next_page']")
if not nav.is_enabled():
raise NoSuchElementException
try:
# block obscuring element
#element = driver.find_element(by=By.XPATH, value="//input[@class='tei39950693']")
#driver.execute_script("arguments[0].style.visibility='hidden'", element)
# select next page
pgnum = uiClasses.Select(driver.find_element(by=By.XPATH, value="//select[@name='pageination']"))
print("pg options:", pgnum.options)
pgnum.select_by_index(currentPage)
numberOfPages = len(pgnum.options)
# click button
pgbutton = driver.find_element(by=By.XPATH, value="//input[@value='go to page']")
pgbutton.click()
except Exception as e:
print(e)
raise NoSuchElementException
time.sleep(10)
html = driver.page_source
savePage(html, listing)
currentPage += 1
if currentPage > numberOfPages:
raise NoSuchElementException
count += 1
@ -259,39 +313,39 @@ def crawlForum(driver):
has_next_page = False
except Exception as e:
print(link, e)
traceback.print_exc()
print(listing, e)
i += 1
print("Crawling the BlackPyramid market done.")
# finalTime = time.time()
# print finalTime - initialTime
input("Crawling Dark Pyramid done successfully. Press ENTER to continue\n")
# Returns 'True' if the link is a description link
#@param: url of any url crawled
#return: true if is a description page, false if not
# Returns 'True' if the link is Topic link
def isDescriptionLink(url):
if 'products' in url:
if 'product' in url:
return True
return False
# Returns True if the link is a listingPage link
#@param: url of any url crawled
#return: true if is a Listing page, false if not
def isListingLink(url):
if 'search' in url:
if 'category=' in url:
return True
return False
# calling the parser to define the links, the html is the url of a link from the list of interested link list
#@param: link from interested link list ie. getInterestingLinks()
#return: list of description links that should be crawled through
# calling the parser to define the links
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return blackpyramid_links_parser(soup)
return BlackPyramid_links_parser(soup)
def crawler():
startCrawling()
# print("Crawling and Parsing BlackPyramid .... DONE!")
# print("Crawling and Parsing BestCardingWorld .... DONE!")
if __name__ == '__main__':
startCrawling()

+ 213
- 128
MarketPlaces/BlackPyramid/parser.py View File

@ -1,4 +1,4 @@
__author__ = 'Helium'
__author__ = 'cern'
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
@ -11,7 +11,7 @@ from bs4 import BeautifulSoup
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def darkfox_description_parser(soup):
def BlackPyramid_description_parser(soup):
# Fields to be parsed
@ -40,82 +40,71 @@ def darkfox_description_parser(soup):
EURO = "-1" # 22 Product_EURO_SellingPrice
# Finding Product Name
name = soup.find('h1').text
name = soup.find('div', {'class': 'panel39002'}).find('span').next_sibling
name = name.replace('\n', ' ')
name = name.replace(",", "")
name = name.strip()
# product description
describe = soup.findAll('div', {'class': 'fer048953'})[1].text
describe = describe.replace('\n', ' ')
describe = describe.replace(",", "")
describe = describe.strip()
# Finding Vendor
vendor = soup.find('h3').find('a').text.strip()
vendor = soup.find('div', {'class': 'bold03905 vstat364'}).text
vendor = vendor.split(" ")
vendor = vendor[2][:-1]
vendor = vendor.replace('\n', ' ')
vendor = vendor.replace(",", "")
vendor = vendor.strip()
# Finding Vendor Rating
rating = soup.find('span', {'class': "tag is-dark"}).text.strip()
rating_span = soup.find('span', {'class': 'to3098503t'}).find_next_sibling('span')
rating_num = rating_span.find('b').text
if rating_num != 'N/A':
rating = rating_num[0:3]
# Finding Successful Transactions
success = soup.find('h3').text
success = success.replace("Vendor: ", "")
success = success.replace(vendor, "")
success = success.replace("(", "")
success = success.replace(")", "")
success_container = soup.find('ul', {'class': 'ul3o00953'}).findAll('li')[1]
success = success_container.find('div').text
success = success.replace('"', '')
success = success.replace("\n", " ")
success = success.replace(",", "")
success = success.strip()
bae = soup.find('div', {'class': "box"}).find_all('ul')
# Finding Prices
USD = bae[1].find('strong').text.strip()
li = bae[2].find_all('li')
USD_text = soup.find('li', {'class': 'vul2994 vghul995'}).find('div').text
USD = USD_text.split(',')[1]
USD = USD.replace('\n', ' ')
USD = USD.replace(",", "")
USD = USD.strip()
# Finding Escrow
escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip()
# Finding the Product Category
category = li[1].find('span', {'class': "tag is-dark"}).text.strip()
# Finding the Product Quantity Available
left = li[3].find('span', {'class': "tag is-dark"}).text.strip()
container = soup.find('ul', {'class': 'bic03095'})
# Finding Number Sold
sold = li[4].find('span', {'class': "tag is-dark"}).text.strip()
li = bae[3].find_all('li')
# Finding Shipment Information (Origin)
if "Ships from:" in li[-2].text:
shipFrom = li[-2].text
shipFrom = shipFrom.replace("Ships from: ", "")
# shipFrom = shipFrom.replace(",", "")
shipFrom = shipFrom.strip()
# Finding Shipment Information (Destination)
shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text
shipTo = shipTo.replace("Ships to: ", "")
shipTo = shipTo.strip()
if "certain countries" in shipTo:
countries = ""
tags = li[-1].find_all('span', {'class': "tag"})
for tag in tags:
country = tag.text.strip()
countries += country + ", "
shipTo = countries.strip(", ")
# Finding the Product description
describe = soup.find('div', {'class': "pre-line"}).text
describe = describe.replace("\n", " ")
describe = describe.strip()
sold_container = container.find('li')
sold_div = sold_container.findAll('div')[2]
sold = sold_div.find('b').next_sibling
sold = sold.replace('"', '')
sold = sold.replace("\n", " ")
sold = sold.replace(",", "")
sold = sold.strip()
'''# Finding the Number of Product Reviews
tag = soup.findAll(text=re.compile('Reviews'))
for index in tag:
reviews = index
par = reviews.find('(')
if par >=0:
reviews = reviews.replace("Reviews (","")
reviews = reviews.replace(")","")
reviews = reviews.split(",")
review = str(abs(int(reviews[0])) + abs(int(reviews[1])))
else :
review = "-1"'''
# Finding the Product Quantity Available
left_container = container.find('li')
left_div = left_container.findAll('div')[3]
left = left_div.find('b').next_sibling
left = left.replace('"', '')
left = left.replace("\n", " ")
left = left.replace(",", "")
left = left.strip()
# Finding number of reviews
positive = soup.find('span', {'class': 'ar04999324'}).text
neutral = soup.find('span', {'class': 'ti9400005 can39953'}).text
negative = soup.find('span', {'class': 'ti9400005 ti90088 can39953'}).text
review = int(positive) + int(neutral) + int(negative)
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
@ -147,11 +136,11 @@ def darkfox_description_parser(soup):
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def darkfox_listing_parser(soup):
def BlackPyramid_listing_parser(soup):
# Fields to be parsed
nm = 0 # Total_Products (Should be Integer)
mktName = "DarkFox" # 0 Marketplace_Name
mktName = "BlackPyramid" # 0 Marketplace_Name
name = [] # 1 Product_Name
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 3 Product_MS_Classification (Microsoft Security)
@ -169,80 +158,82 @@ def darkfox_listing_parser(soup):
qLeft =[] # 15 Product_QuantityLeft
shipFrom = [] # 16 Product_ShippedFrom
shipTo = [] # 17 Product_ShippedTo
vendor = [] # 18 Vendor
rating = [] # 19 Vendor_Rating
success = [] # 20 Vendor_Successful_Transactions
rating_item = [] # 18 Product_Rating
vendor = [] # 19 Vendor
rating = [] # 20 Vendor_Rating
success = [] # 21 Vendor_Successful_Transactions
href = [] # 23 Product_Links (Urls)
listing = soup.findAll('div', {"class": "card"})
listing = soup.findAll('article', {"class": "product"})
# Some listing pages have an additional article section which is blank
if not listing[-1].findAll('a', href=True):
listing = listing[:-1]
# Populating the Number of Products
nm = len(listing)
for a in listing:
bae = a.findAll('a', href=True)
for card in listing:
bae = card.findAll('a', href=True)
# Adding the url to the list of urls
link = bae[0].get('href')
link = bae[2].get('href')
link = cleanLink(link)
href.append(link)
# Finding the Product
product = bae[1].find('p').text
product = bae[3].text
product = product.replace('\n', ' ')
product = product.replace(",", "")
product = product.replace("...", "")
product = product.strip()
name.append(product)
bae = a.find('div', {'class': "media-content"}).find('div').find_all('div')
if len(bae) >= 5:
# Finding Prices
price = bae[0].text
ud = price.replace(" USD", " ")
# u = ud.replace("$","")
u = ud.replace(",", "")
u = u.strip()
USD.append(u)
# bc = (prc[1]).strip(' BTC')
# BTC.append(bc)
# Finding the Vendor
vendor_name = bae[1].find('a').text
vendor_name = vendor_name.replace(",", "")
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding the Category
cat = bae[2].find('small').text
cat = cat.replace("Category: ", "")
cat = cat.replace(",", "")
cat = cat.strip()
category.append(cat)
# Finding Number Sold and Quantity Left
num = bae[3].text
num = num.replace("Sold: ", "")
num = num.strip()
sold.append(num)
quant = bae[4].find('small').text
quant = quant.replace("In stock: ", "")
quant = quant.strip()
qLeft.append(quant)
# Finding Successful Transactions
freq = bae[1].text
freq = freq.replace(vendor_name, "")
freq = re.sub(r'Vendor Level \d+', "", freq)
freq = freq.replace("(", "")
freq = freq.replace(")", "")
freq = freq.strip()
success.append(freq)
# Finding description
# 'recurisve = False' only searches direct children
desc = card.findChildren('div', recursive=False)[0]
desc = desc.findAll('div', recursive=False)[3].text
desc = desc.replace('\n', ' ')
desc = desc.replace(",", "")
desc = desc.strip()
describe.append(desc)
# Finding Vendor Name
vendor_name = bae[4].find('span').text
vendor_name = vendor_name.split(' ')[1]
vendor_name = vendor_name.replace('\n', ' ')
vendor_name = vendor_name.replace(",", "")
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding the Category
cat = card.findAll('div', recursive=False)[0].findAll('div', recursive=False)[1].find('span').text
cat = cat.replace("\n", "")
cat = cat.replace(",", "")
cat = cat.strip()
category.append(cat)
bae = card.findAll('div', recursive=False)[1].findAll('div', recursive=False)[1]
# Finding amount left
left = bae.findAll('div', recursive=False)[1].text
left = left.replace("x", "")
left = left.replace('\n', ' ')
left = left.replace(",", "")
left = left.strip()
qLeft.append(left)
# Finding amount sold
qsold = bae.findAll('div', recursive=False)[2].text
qsold = qsold.replace('\n', ' ')
qsold = qsold.replace("x", "")
qsold = qsold.replace(",", "")
qsold = qsold.strip()
sold.append(qsold)
# Searching for CVE and MS categories
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
cve = card.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue="-1"
else:
@ -255,7 +246,7 @@ def darkfox_listing_parser(soup):
cveValue=cee
CVE.append(cveValue)
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
ms = card.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue="-1"
else:
@ -269,23 +260,117 @@ def darkfox_listing_parser(soup):
MS.append(MSValue)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen,
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href)
return organizeProducts(mktName, nm, vendor, rating, success, name, CVE, MS, category, describe, views, reviews, rating,
addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def blackpyramid_links_parser(soup):
def BlackPyramid_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing = soup.findAll('article', {"class": "product"})
for div in listing:
link = div.find('a', {"class": "ah39063"})['href']
href.append(link)
return href
for item in listing:
container = item.find('a', {"class": "ah39063"})
if container:
link = item.find('a', {"class": "ah39063"})['href']
href.append(link)
return href
import glob
import os
import codecs
import shutil
import traceback
if __name__ == '__main__':
nError = 0
marketPlace = 'BlackPyramid'
lines = [] # listing pages
lns = [] # description pages
detPage = {}
'''
# reading description pages
count = 0
for fileDescription in glob.glob(os.path.join("..\\" + marketPlace + "\\HTML_Pages\\10222023\\Description", '*.html')):
count += 1
lns.append(fileDescription)
# if count > 5:
# break
for index, line2 in enumerate(lns):
print("Reading description folder of '" + marketPlace + "', file '" + os.path.basename(line2) + "', index= " + str(index + 1) + " ... " + str(len(lns)))
try:
html = codecs.open(line2.strip('\n'), encoding='utf8')
soup = BeautifulSoup(html, "html.parser")
html.close()
except:
try:
html = open(line2.strip('\n'))
soup = BeautifulSoup(html, "html.parser")
html.close()
except:
nError += 1
print("There was a problem to read the file " + line2 + " in the Description section!")
# if createLog:
# logFile.write(str(nError) + ". There was a problem to read the file " + line2 + " in the Description section.\n")
continue
try:
print(BlackPyramid_description_parser(soup))
except:
traceback.print_exc()
print("There was a problem to parse the file " + line2 + " in the Description section!")
'''
# reading listing pages
count = 0
for fileListing in glob.glob(os.path.join("..\\" + marketPlace + "\\HTML_Pages\\10222023\\Listing", '*.html')):
count += 1
lines.append(fileListing)
# if count > 1:
# break
for index, line1 in enumerate(lines):
print("Reading listing folder of '" + marketPlace + "', file '" + os.path.basename(line1) + "', index= " + str(
index + 1) + " ... " + str(len(lines)))
readError = False
try:
html = codecs.open(line1.strip('\n'), encoding='utf8')
soup = BeautifulSoup(html, "html.parser")
html.close()
except:
try:
html = open(line1.strip('\n'))
soup = BeautifulSoup(html, "html.parser")
html.close()
except:
print("There was a problem to read the file " + line1 + " in the Listing section!")
readError = True
if not readError:
parseError = False
try:
print(BlackPyramid_listing_parser(soup))
except:
traceback.print_exc()
print("There was a problem to parse the file " + line1 + " in the listing section!")
parseError = True
print("DONE")

Loading…
Cancel
Save