__author__ = 'DarkWeb'
|
|
|
|
# Here, we are importing the auxiliary functions to clean or convert data
|
|
from MarketPlaces.Utilities.utilities import *
|
|
|
|
# Here, we are importing BeautifulSoup to search through the HTML tree
|
|
from bs4 import BeautifulSoup
|
|
|
|
|
|
# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
|
|
# stores info it needs in different lists, these lists are returned after being organized
|
|
# @param: soup object looking at html page of description page
|
|
# return: 'row' that contains a variety of lists that each hold info on the description page
|
|
def sonanza_description_parser(soup):
|
|
# Fields to be parsed
|
|
|
|
vendor = "-1" # 0 *Vendor_Name
|
|
success = "-1" # 1 Vendor_Successful_Transactions
|
|
rating_vendor = "-1" # 2 Vendor_Rating
|
|
name = "-1" # 3 *Product_Name
|
|
describe = "-1" # 4 Product_Description
|
|
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
|
|
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
|
|
category = "-1" # 7 Product_Category
|
|
views = "-1" # 8 Product_Number_Of_Views
|
|
reviews = "-1" # 9 Product_Number_Of_Reviews
|
|
rating_item = "-1" # 10 Product_Rating
|
|
addDate = "-1" # 11 Product_AddedDate
|
|
BTC = "-1" # 12 Product_BTC_SellingPrice
|
|
USD = "-1" # 13 Product_USD_SellingPrice
|
|
EURO = "-1" # 14 Product_EURO_SellingPrice
|
|
sold = "-1" # 15 Product_QuantitySold
|
|
left = "-1" # 16 Product_QuantityLeft
|
|
shipFrom = "-1" # 17 Product_ShippedFrom
|
|
shipTo = "-1" # 18 Product_ShippedTo
|
|
image = "-1" # 19 Product_Image
|
|
vendor_image = "-1" # 20 Vendor_Image
|
|
|
|
listing = soup.find('div', {"id": "article_page"})
|
|
|
|
# Finding the Product
|
|
name = listing.find('div', {"class": "row box"}).text
|
|
name = cleanString(name).strip()
|
|
|
|
# Finding Product Image
|
|
product_image = listing.find('img')
|
|
product_image = product_image.get('src')
|
|
product_image = product_image.split('base64,')[-1]
|
|
image = product_image
|
|
|
|
table = listing.find('div', {"class": "col-md-5"})
|
|
|
|
# Finding Prices
|
|
USD = table.find('span', {"class": "pr"}).text
|
|
USD = USD.replace("$", "").strip()
|
|
|
|
BTC = table.find_all('span', {"class": "pr1"})[1].text
|
|
BTC = BTC.replace("BTC", "").strip()
|
|
|
|
rows = table.find_all('p', {"class": "mb-0"})
|
|
for row in rows:
|
|
temp = row.text
|
|
if "CATEGORY" in temp:
|
|
category = temp.replace("CATEGORY :", "")
|
|
category = cleanString(category).strip()
|
|
elif "VENDOR LEVEL" in temp:
|
|
rating_vendor = temp.replace("VENDOR LEVEL :", "")
|
|
rating_vendor = cleanString(rating_vendor).strip()
|
|
|
|
rows = listing.find_all('p', {"class": "mb-1"})
|
|
for row in rows:
|
|
temp = row.text
|
|
if "VENDOR" in temp:
|
|
vendor = temp.replace("VENDOR :", "")
|
|
vendor = cleanString(vendor).strip()
|
|
elif "SHIPS TO" in temp:
|
|
shipTo = temp.replace("SHIPS TO :", "")
|
|
shipTo = cleanString(shipTo).strip()
|
|
elif "SOLD" in temp:
|
|
sold = cleanNumbers(temp).strip()
|
|
|
|
# Finding Product Description
|
|
describe = listing.find('pre').text
|
|
describe = cleanString(describe).strip()
|
|
|
|
# Searching for CVE and MS categories
|
|
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
|
|
if cve:
|
|
CVE = " "
|
|
for idx in cve:
|
|
CVE += (idx)
|
|
CVE += " "
|
|
CVE = CVE.replace(',', ' ')
|
|
CVE = CVE.replace('\n', '')
|
|
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
|
|
if ms:
|
|
MS = " "
|
|
for im in ms:
|
|
MS += (im)
|
|
MS += " "
|
|
MS = MS.replace(',', ' ')
|
|
MS = MS.replace('\n', '')
|
|
|
|
# Populating the final variable (this should be a list with all fields scraped)
|
|
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
|
|
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
|
|
|
|
# Sending the results
|
|
return row
|
|
|
|
|
|
# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
|
|
# stores info it needs in different lists, these lists are returned after being organized
|
|
# @param: soup object looking at html page of listing page
|
|
# return: 'row' that contains a variety of lists that each hold info on the listing page
|
|
def sonanza_listing_parser(soup):
|
|
|
|
# Fields to be parsed
|
|
nm = 0 # *Total_Products (Should be Integer)
|
|
mktName = "Sonanza" # 0 *Marketplace_Name
|
|
vendor = [] # 1 *Vendor y
|
|
rating_vendor = [] # 2 Vendor_Rating
|
|
success = [] # 3 Vendor_Successful_Transactions
|
|
name = [] # 4 *Product_Name y
|
|
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
|
|
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this
|
|
category = [] # 7 Product_Category y
|
|
describe = [] # 8 Product_Description
|
|
views = [] # 9 Product_Number_Of_Views
|
|
reviews = [] # 10 Product_Number_Of_Reviews
|
|
rating_item = [] # 11 Product_Rating
|
|
addDate = [] # 12 Product_AddDate
|
|
BTC = [] # 13 Product_BTC_SellingPrice
|
|
USD = [] # 14 Product_USD_SellingPrice y
|
|
EURO = [] # 15 Product_EURO_SellingPrice
|
|
sold = [] # 16 Product_QuantitySold
|
|
qLeft = [] # 17 Product_QuantityLeft
|
|
shipFrom = [] # 18 Product_ShippedFrom
|
|
shipTo = [] # 19 Product_ShippedTo
|
|
image = [] # 20 Product_Image
|
|
image_vendor = [] # 21 Vendor_Image
|
|
href = [] # 22 Product_Links
|
|
|
|
listings = soup.findAll('div', {"class": "col-sm-4 col-md-3"})
|
|
|
|
# Populating the Number of Products
|
|
nm = len(listings)
|
|
|
|
for listing in listings:
|
|
|
|
# Adding the url to the list of urls
|
|
bae = listing.find('a', href=True)
|
|
link = bae.get('href')
|
|
href.append(link)
|
|
|
|
# Finding Product Image
|
|
product_image = listing.find('img')
|
|
product_image = product_image.get('src')
|
|
product_image = product_image.split('base64,')[-1]
|
|
image.append(product_image)
|
|
|
|
# Finding the Product
|
|
product = listing.find('h5', {"class": "art_title"}).text
|
|
product = cleanString(product)
|
|
name.append(product.strip())
|
|
|
|
# Finding Prices
|
|
price = listing.find('span', {"class": "priceP"}).text
|
|
price = price.replace("$", "")
|
|
USD.append(price.strip())
|
|
|
|
rows = listing.find_all('p', {"class": "mb-0 card-text"})
|
|
for row in rows:
|
|
temp = row.text
|
|
if "CATEGORY" in temp:
|
|
cat = temp.replace("CATEGORY :", "")
|
|
cat = cleanString(cat)
|
|
category.append(cat.strip())
|
|
elif "VENDOR" in temp:
|
|
vendor_name = temp.replace("VENDOR :", "")
|
|
vendor_name = cleanString(vendor_name)
|
|
vendor.append(vendor_name.strip())
|
|
|
|
# Finding Vendor Rating
|
|
rating = listing.find('span', {"class": "badge badge-info"}).text
|
|
rating = rating.replace("VENDOR LEVEL :", "")
|
|
rating = cleanString(rating)
|
|
rating_vendor.append(rating.strip())
|
|
|
|
# Searching for CVE and MS categories
|
|
cve = listing.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
|
|
if not cve:
|
|
cveValue = "-1"
|
|
else:
|
|
cee = " "
|
|
for idx in cve:
|
|
cee += (idx)
|
|
cee += " "
|
|
cee = cee.replace(',', ' ')
|
|
cee = cee.replace('\n', '')
|
|
cveValue = cee
|
|
CVE.append(cveValue)
|
|
|
|
ms = listing.findAll(text=re.compile('MS\d{2}-\d{3}'))
|
|
if not ms:
|
|
MSValue = "-1"
|
|
else:
|
|
me = " "
|
|
for im in ms:
|
|
me += (im)
|
|
me += " "
|
|
me = me.replace(',', ' ')
|
|
me = me.replace('\n', '')
|
|
MSValue = me
|
|
MS.append(MSValue)
|
|
|
|
# Populate the final variable (this should be a list with all fields scraped)
|
|
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
|
|
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)
|
|
|
|
|
|
# called by the crawler to get description links on a listing page
|
|
# @param: beautifulsoup object that is using the correct html page (listing page)
|
|
# return: list of description links from a listing page
|
|
def sonanza_links_parser(soup):
|
|
# Returning all links that should be visited by the Crawler
|
|
|
|
href = []
|
|
listings = soup.findAll('div', {"class": "col-sm-4 col-md-3"})
|
|
|
|
for listing in listings:
|
|
a = listing.find('a', href=True)
|
|
|
|
# Adding the url to the list of urls
|
|
link = a.get('href')
|
|
href.append(link)
|
|
|
|
return href
|