|
|
@ -0,0 +1,264 @@ |
|
|
|
# Here, we are importing the auxiliary functions to clean or convert data |
|
|
|
from MarketPlaces.Utilities.utilities import * |
|
|
|
|
|
|
|
# Here, we are importing BeautifulSoup to search through the HTML tree |
|
|
|
from bs4 import BeautifulSoup |
|
|
|
|
|
|
|
|
|
|
|
# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs |
|
|
|
# stores info it needs in different lists, these lists are returned after being organized |
|
|
|
# @param: soup object looking at html page of description page |
|
|
|
# return: 'row' that contains a variety of lists that each hold info on the description page |
|
|
|
def darkbazar_description_parser(soup): |
|
|
|
# Fields to be parsed |
|
|
|
|
|
|
|
vendor = "-1" # 0 *Vendor_Name |
|
|
|
success = "-1" # 1 Vendor_Successful_Transactions |
|
|
|
rating_vendor = "-1" # 2 Vendor_Rating |
|
|
|
name = "-1" # 3 *Product_Name |
|
|
|
describe = "-1" # 4 Product_Description |
|
|
|
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) |
|
|
|
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) |
|
|
|
category = "-1" # 7 Product_Category |
|
|
|
views = "-1" # 8 Product_Number_Of_Views |
|
|
|
reviews = "-1" # 9 Product_Number_Of_Reviews |
|
|
|
rating_item = "-1" # 10 Product_Rating |
|
|
|
addDate = "-1" # 11 Product_AddedDate |
|
|
|
BTC = "-1" # 12 Product_BTC_SellingPrice |
|
|
|
USD = "-1" # 13 Product_USD_SellingPrice |
|
|
|
EURO = "-1" # 14 Product_EURO_SellingPrice |
|
|
|
sold = "-1" # 15 Product_QuantitySold |
|
|
|
left = "-1" # 16 Product_QuantityLeft |
|
|
|
shipFrom = "-1" # 17 Product_ShippedFrom |
|
|
|
shipTo = "-1" # 18 Product_ShippedTo |
|
|
|
|
|
|
|
# Finding Product Name |
|
|
|
divmb = soup.findAll('div', {'class': "mb-1"}) |
|
|
|
|
|
|
|
name = divmb[0].text |
|
|
|
name = name.replace('\n', ' ') |
|
|
|
name = name.replace(",", "") |
|
|
|
name = name.strip() |
|
|
|
|
|
|
|
# Finding Vendor |
|
|
|
vendor = divmb[1].find('a').text.strip() |
|
|
|
|
|
|
|
# Finding Vendor Rating |
|
|
|
rating = soup.find('div', {'class': ""}).text |
|
|
|
rating = rating.replace("Vendor's Review : ", "") |
|
|
|
rating = rating.strip() |
|
|
|
|
|
|
|
# Finding Successful Transactions |
|
|
|
success = divmb[3].text |
|
|
|
success = success.replace("Level:", "") |
|
|
|
success = success.strip() |
|
|
|
|
|
|
|
# Finding Prices |
|
|
|
USD = soup.find('div', {'class': "h3 text - primary"}).text.strip() |
|
|
|
|
|
|
|
|
|
|
|
# Finding Escrow |
|
|
|
escrow = divmb[5].find('span', {'class': "badge badge-danger"}).text.strip() |
|
|
|
|
|
|
|
# Finding the Product Category |
|
|
|
pmb = soup.findAll('p', {'class': "mb-1"}) |
|
|
|
|
|
|
|
category = pmb[-1].text |
|
|
|
category = category.replace("Category: ", "").strip() |
|
|
|
|
|
|
|
# Finding the Product Quantity Available |
|
|
|
left = divmb[-1].text |
|
|
|
left = left.split(",", 1)[1] |
|
|
|
left = left.replace("in stock", "") |
|
|
|
left = left.strip() |
|
|
|
|
|
|
|
# Finding Number Sold |
|
|
|
sold = divmb[-1].text |
|
|
|
sold = sold.split(",", 1)[0] |
|
|
|
sold = sold.replace("sold", "") |
|
|
|
sold = sold.strip() |
|
|
|
|
|
|
|
|
|
|
|
# Finding Shipment Information (Origin) |
|
|
|
pmb[0].text |
|
|
|
shipFrom = shipFrom.replace("Ships from: ", "").strip() |
|
|
|
|
|
|
|
# Finding Shipment Information (Destination) |
|
|
|
pmb[1].text |
|
|
|
shipTo = shipTo.replace("Ships to: ", "").strip() |
|
|
|
|
|
|
|
# Finding the Product description |
|
|
|
cardbody = soup.findAll('div', {'class': "card-body"}) |
|
|
|
describe = cardbody[1].text.strip() |
|
|
|
|
|
|
|
# Finding the Number of Product Reviews |
|
|
|
reviews = reviews.find('div', {'class': "product-rating"}).text |
|
|
|
reviews = reviews.replace("(", "") |
|
|
|
reviews = reviews.replace(" review)", "") |
|
|
|
reviews = reviews.strip() |
|
|
|
|
|
|
|
|
|
|
|
# Searching for CVE and MS categories |
|
|
|
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) |
|
|
|
if cve: |
|
|
|
CVE = " " |
|
|
|
for idx in cve: |
|
|
|
CVE += (idx) |
|
|
|
CVE += " " |
|
|
|
CVE = CVE.replace(',', ' ') |
|
|
|
CVE = CVE.replace('\n', '') |
|
|
|
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) |
|
|
|
if ms: |
|
|
|
MS = " " |
|
|
|
for im in ms: |
|
|
|
MS += (im) |
|
|
|
MS += " " |
|
|
|
MS = MS.replace(',', ' ') |
|
|
|
MS = MS.replace('\n', '') |
|
|
|
|
|
|
|
# Populating the final variable (this should be a list with all fields scraped) |
|
|
|
row = (name, describe, CVE, MS, review, category, shipFrom, shipTo, left, escrow, vendor, |
|
|
|
sold, addDate, BTC, USD, rating, success, EURO) |
|
|
|
|
|
|
|
# Sending the results |
|
|
|
return row |
|
|
|
|
|
|
|
|
|
|
|
# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs |
|
|
|
# stores info it needs in different lists, these lists are returned after being organized |
|
|
|
# @param: soup object looking at html page of listing page |
|
|
|
# return: 'row' that contains a variety of lists that each hold info on the listing page |
|
|
|
def darkbazar_listing_parser(soup): |
|
|
|
# Fields to be parsed |
|
|
|
nm = 0 # Total_Products (Should be Integer) |
|
|
|
mktName = "DarkBazar" # 0 Marketplace_Name |
|
|
|
name = [] # 1 Product_Name |
|
|
|
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) |
|
|
|
MS = [] # 3 Product_MS_Classification (Microsoft Security) |
|
|
|
category = [] # 4 Product_Category |
|
|
|
describe = [] # 5 Product_Description |
|
|
|
escrow = [] # 6 Vendor_Warranty |
|
|
|
views = [] # 7 Product_Number_Of_Views |
|
|
|
reviews = [] # 8 Product_Number_Of_Reviews |
|
|
|
addDate = [] # 9 Product_AddDate |
|
|
|
lastSeen = [] # 10 Product_LastViewDate |
|
|
|
BTC = [] # 11 Product_BTC_SellingPrice |
|
|
|
USD = [] # 12 Product_USD_SellingPrice |
|
|
|
EURO = [] # 13 Product_EURO_SellingPrice |
|
|
|
sold = [] # 14 Product_QuantitySold |
|
|
|
qLeft = [] # 15 Product_QuantityLeft |
|
|
|
shipFrom = [] # 16 Product_ShippedFrom |
|
|
|
shipTo = [] # 17 Product_ShippedTo |
|
|
|
vendor = [] # 18 Vendor |
|
|
|
rating = [] # 19 Vendor_Rating |
|
|
|
success = [] # 20 Vendor_Successful_Transactions |
|
|
|
href = [] # 23 Product_Links (Urls) |
|
|
|
|
|
|
|
listing = soup.findAll('div', {"id": "itembox"}) |
|
|
|
|
|
|
|
# Populating the Number of Products |
|
|
|
nm = len(listing) |
|
|
|
|
|
|
|
for a in listing: |
|
|
|
bae = a.findAll('a', href=True) |
|
|
|
lb = a.findAll('div', {"id": "littlebox"}) |
|
|
|
|
|
|
|
# Adding the url to the list of urls |
|
|
|
link = bae[0].get('href') |
|
|
|
link = cleanLink(link) |
|
|
|
href.append(link) |
|
|
|
|
|
|
|
# Finding the Product |
|
|
|
product = lb[1].find('a').text |
|
|
|
product = product.replace('\n', ' ') |
|
|
|
product = product.replace(",", "") |
|
|
|
product = product.replace("...", "") |
|
|
|
product = product.strip() |
|
|
|
name.append(product) |
|
|
|
|
|
|
|
# Finding Prices |
|
|
|
price = lb[-1].find('div', {"class": "mb-1"}).text |
|
|
|
price = price.replace("$","") |
|
|
|
price = price.strip() |
|
|
|
USD.append(price) |
|
|
|
|
|
|
|
# Finding the Vendor |
|
|
|
vendor_name = lb[-1].find("a").text |
|
|
|
vendor_name = vendor_name.replace(",", "") |
|
|
|
vendor_name = vendor_name.strip() |
|
|
|
vendor.append(vendor_name) |
|
|
|
|
|
|
|
# Finding the Category |
|
|
|
cat = lb[-1].find("span").text |
|
|
|
cat = cat.replace("class:", "") |
|
|
|
cat = cat.strip() |
|
|
|
category.append(cat) |
|
|
|
|
|
|
|
# Finding Number Sold and Quantity Left |
|
|
|
span = lb[1].findAll("span") |
|
|
|
num = span[-1].text |
|
|
|
num = num.replace("Sold:", "") |
|
|
|
num = num.strip() |
|
|
|
category.append(num) |
|
|
|
|
|
|
|
quant = span[1].text |
|
|
|
quant = quant.replace("stock:", "") |
|
|
|
quant = quant.strip() |
|
|
|
qLeft.append(quant) |
|
|
|
|
|
|
|
|
|
|
|
# Searching for CVE and MS categories |
|
|
|
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) |
|
|
|
if not cve: |
|
|
|
cveValue = "-1" |
|
|
|
else: |
|
|
|
cee = " " |
|
|
|
for idx in cve: |
|
|
|
cee += (idx) |
|
|
|
cee += " " |
|
|
|
cee = cee.replace(',', ' ') |
|
|
|
cee = cee.replace('\n', '') |
|
|
|
cveValue = cee |
|
|
|
CVE.append(cveValue) |
|
|
|
|
|
|
|
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) |
|
|
|
if not ms: |
|
|
|
MSValue = "-1" |
|
|
|
else: |
|
|
|
me = " " |
|
|
|
for im in ms: |
|
|
|
me += (im) |
|
|
|
me += " " |
|
|
|
me = me.replace(',', ' ') |
|
|
|
me = me.replace('\n', '') |
|
|
|
MSValue = me |
|
|
|
MS.append(MSValue) |
|
|
|
|
|
|
|
# Populate the final variable (this should be a list with all fields scraped) |
|
|
|
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, |
|
|
|
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) |
|
|
|
|
|
|
|
|
|
|
|
# called by the crawler to get description links on a listing page |
|
|
|
# @param: beautifulsoup object that is using the correct html page (listing page) |
|
|
|
# return: list of description links from a listing page |
|
|
|
def darkbazar_links_parser(soup): |
|
|
|
# Returning all links that should be visited by the Crawler |
|
|
|
|
|
|
|
href = [] |
|
|
|
listing = soup.findAll('div', {"id": "itembox"}) |
|
|
|
|
|
|
|
# for a in listing: |
|
|
|
# bae = a.find('a', {"class": "text-info"}, href=True) |
|
|
|
# link = bae['href'] |
|
|
|
# href.append(link) |
|
|
|
|
|
|
|
for a in listing: |
|
|
|
bae = a.findAll('a', href=True) |
|
|
|
|
|
|
|
# Adding the url to the list of urls |
|
|
|
link = bae[0].get('href') |
|
|
|
link = cleanLink(link) |
|
|
|
href.append(link) |
|
|
|
|
|
|
|
return href |