this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 

285 lines
11 KiB

__author__ = 'cern'
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def BlackPyramid_description_parser(soup):
# Fields to be parsed
name = "-1" # 0 Product_Name
describe = "-1" # 1 Product_Description
lastSeen = "-1" # 2 Product_LastViewDate
rules = "-1" # 3 NOT USED ...
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 5 Product_MS_Classification (Microsoft Security)
review = "-1" # 6 Product_Number_Of_Reviews
category = "-1" # 7 Product_Category
shipFrom = "-1" # 8 Product_ShippedFrom
shipTo = "-1" # 9 Product_ShippedTo
left = "-1" # 10 Product_QuantityLeft
escrow = "-1" # 11 Vendor_Warranty
terms = "-1" # 12 Vendor_TermsAndConditions
vendor = "-1" # 13 Vendor_Name
sold = "-1" # 14 Product_QuantitySold
addDate = "-1" # 15 Product_AddedDate
available = "-1" # 16 NOT USED ...
endDate = "-1" # 17 NOT USED ...
BTC = "-1" # 18 Product_BTC_SellingPrice
USD = "-1" # 19 Product_USD_SellingPrice
rating = "-1" # 20 Vendor_Rating
success = "-1" # 21 Vendor_Successful_Transactions
EURO = "-1" # 22 Product_EURO_SellingPrice
# Finding Product Name
name = soup.find('div', {'class': 'panel39002'}).find('span').next_sibling
name = name.replace('\n', ' ')
name = name.replace(",", "")
name = name.strip()
# product description
describe = soup.findAll('div', {'class': 'fer048953'})[1].text
describe = describe.replace('\n', ' ')
describe = describe.replace(",", "")
describe = describe.strip()
# Finding Vendor
vendor = soup.find('div', {'class': 'bold03905 vstat364'}).text
vendor = vendor.split(" ")
vendor = vendor[2][:-1]
vendor = vendor.replace('\n', ' ')
vendor = vendor.replace(",", "")
vendor = vendor.strip()
# Finding Vendor Rating
rating_span = soup.find('span', {'class': 'to3098503t'}).find_next_sibling('span')
rating_num = rating_span.find('b').text
if rating_num != 'N/A':
rating = rating_num[0:3]
# Finding Successful Transactions
success_container = soup.find('ul', {'class': 'ul3o00953'}).findAll('li')[1]
success = success_container.find('div').text
success = success.replace('"', '')
success = success.replace("\n", " ")
success = success.replace(",", "")
success = success.strip()
# Finding Prices
USD_text = soup.find('li', {'class': 'vul2994 vghul995'}).find('div').text
USD = USD_text.split(',')[1]
USD = USD.replace('\n', ' ')
USD = USD.replace(",", "")
USD = USD.strip()
container = soup.find('ul', {'class': 'bic03095'})
# Finding Number Sold
sold_container = container.find('li')
sold_div = sold_container.findAll('div')[2]
sold = sold_div.find('b').next_sibling
sold = sold.replace('"', '')
sold = sold.replace("\n", " ")
sold = sold.replace(",", "")
sold = sold.strip()
# Finding the Product Quantity Available
left_container = container.find('li')
left_div = left_container.findAll('div')[3]
left = left_div.find('b').next_sibling
left = left.replace('"', '')
left = left.replace("\n", " ")
left = left.replace(",", "")
left = left.strip()
# Finding number of reviews
positive = soup.find('span', {'class': 'ar04999324'}).text
neutral = soup.find('span', {'class': 'ti9400005 can39953'}).text
negative = soup.find('span', {'class': 'ti9400005 ti90088 can39953'}).text
review = int(positive) + int(neutral) + int(negative)
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if cve:
CVE = " "
for idx in cve:
CVE += (idx)
CVE += " "
CVE = CVE.replace(',', ' ')
CVE = CVE.replace('\n', '')
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
if ms:
MS = " "
for im in ms:
MS += (im)
MS += " "
MS = MS.replace(',', ' ')
MS = MS.replace('\n', '')
# Populating the final variable (this should be a list with all fields scraped)
row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor,
sold, addDate, available, endDate, BTC, USD, rating, success, EURO)
# Sending the results
return row
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def BlackPyramid_listing_parser(soup):
# Fields to be parsed
nm = 0 # Total_Products (Should be Integer)
mktName = "BlackPyramid" # 0 Marketplace_Name
name = [] # 1 Product_Name
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 3 Product_MS_Classification (Microsoft Security)
category = [] # 4 Product_Category
describe = [] # 5 Product_Description
escrow = [] # 6 Vendor_Warranty
views = [] # 7 Product_Number_Of_Views
reviews = [] # 8 Product_Number_Of_Reviews
addDate = [] # 9 Product_AddDate
lastSeen = [] # 10 Product_LastViewDate
BTC = [] # 11 Product_BTC_SellingPrice
USD = [] # 12 Product_USD_SellingPrice
EURO = [] # 13 Product_EURO_SellingPrice
sold = [] # 14 Product_QuantitySold
qLeft =[] # 15 Product_QuantityLeft
shipFrom = [] # 16 Product_ShippedFrom
shipTo = [] # 17 Product_ShippedTo
rating_item = [] # 18 Product_Rating
vendor = [] # 19 Vendor
rating = [] # 20 Vendor_Rating
success = [] # 21 Vendor_Successful_Transactions
href = [] # 23 Product_Links (Urls)
listing = soup.findAll('article', {"class": "product"})
# Some listing pages have an additional article section which is blank
if not listing[-1].findAll('a', href=True):
listing = listing[:-1]
# Populating the Number of Products
nm = len(listing)
for card in listing:
bae = card.findAll('a', href=True)
# Adding the url to the list of urls
link = bae[2].get('href')
link = cleanLink(link)
href.append(link)
# Finding the Product
product = bae[3].text
product = product.replace('\n', ' ')
product = product.replace(",", "")
product = product.replace("...", "")
product = product.strip()
name.append(product)
# Finding description
# 'recurisve = False' only searches direct children
desc = card.findChildren('div', recursive=False)[0]
desc = desc.findAll('div', recursive=False)[3].text
desc = desc.replace('\n', ' ')
desc = desc.replace(",", "")
desc = desc.strip()
describe.append(desc)
# Finding Vendor Name
vendor_name = bae[4].find('span').text
vendor_name = vendor_name.split(' ')[1]
vendor_name = vendor_name.replace('\n', ' ')
vendor_name = vendor_name.replace(",", "")
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding the Category
cat = card.findAll('div', recursive=False)[0].findAll('div', recursive=False)[1].find('span').text
cat = cat.replace("\n", "")
cat = cat.replace(",", "")
cat = cat.strip()
category.append(cat)
bae = card.findAll('div', recursive=False)[1].findAll('div', recursive=False)[1]
# Finding amount left
left = bae.findAll('div', recursive=False)[1].text
left = left.replace("x", "")
left = left.replace('\n', ' ')
left = left.replace(",", "")
left = left.strip()
qLeft.append(left)
# Finding amount sold
qsold = bae.findAll('div', recursive=False)[2].text
qsold = qsold.replace('\n', ' ')
qsold = qsold.replace("x", "")
qsold = qsold.replace(",", "")
qsold = qsold.strip()
sold.append(qsold)
# Searching for CVE and MS categories
cve = card.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue="-1"
else:
cee = " "
for idx in cve:
cee += (idx)
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cveValue=cee
CVE.append(cveValue)
ms = card.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue="-1"
else:
me = " "
for im in ms:
me += (im)
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue=me
MS.append(MSValue)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, vendor, rating, success, name, CVE, MS, category, describe, views, reviews, rating,
addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def BlackPyramid_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing = soup.findAll('article', {"class": "product"})
for item in listing:
container = item.find('a', {"class": "ah39063"})
if container:
link = item.find('a', {"class": "ah39063"})['href']
href.append(link)
return href