__author__ = 'DarkWeb'
|
|
|
|
# Here, we are importing the auxiliary functions to clean or convert data
|
|
from MarketPlaces.Utilities.utilities import *
|
|
# Here, we are importing BeautifulSoup to search through the HTML tree
|
|
from bs4 import BeautifulSoup
|
|
|
|
|
|
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
|
|
#stores info it needs in different lists, these lists are returned after being organized
|
|
#@param: soup object looking at html page of description page
|
|
#return: 'row' that contains a variety of lists that each hold info on the description page
|
|
def torbay_description_parser(soup):
|
|
|
|
# Fields to be parsed
|
|
|
|
vendor = "-1" # 0 *Vendor_Name
|
|
success = "-1" # 1 Vendor_Successful_Transactions
|
|
rating_vendor = "-1" # 2 Vendor_Rating
|
|
name = "-1" # 3 *Product_Name
|
|
describe = "-1" # 4 Product_Description
|
|
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
|
|
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
|
|
category = "-1" # 7 Product_Category
|
|
views = "-1" # 8 Product_Number_Of_Views
|
|
reviews = "-1" # 9 Product_Number_Of_Reviews
|
|
rating_item = "-1" # 10 Product_Rating
|
|
addDate = "-1" # 11 Product_AddedDate
|
|
BTC = "-1" # 12 Product_BTC_SellingPrice
|
|
USD = "-1" # 13 Product_USD_SellingPrice
|
|
EURO = "-1" # 14 Product_EURO_SellingPrice
|
|
sold = "-1" # 15 Product_QuantitySold
|
|
left = "-1" # 16 Product_QuantityLeft
|
|
shipFrom = "-1" # 17 Product_ShippedFrom
|
|
shipTo = "-1" # 18 Product_ShippedTo
|
|
image = "-1" # 19 Product_Image
|
|
vendor_image = "-1" # 20 Vendor_Image
|
|
|
|
# Finding Product Name
|
|
try:
|
|
product_name = soup.find('div', {'class': 'product-information'}).find('h1').text
|
|
name = cleanString(product_name.strip())
|
|
except:
|
|
product_name = soup.find('div', {'class': 'profile-info'}).find('h2').text
|
|
name = cleanString(product_name.strip())
|
|
|
|
# Finding Vendor FIx
|
|
vendor_name = soup.find('div', {"class": "profile-info"}).find('h2').text
|
|
vendor = cleanString(vendor_name.strip())
|
|
|
|
# Finding Vendor Image
|
|
vendor_image = soup.find('div', {'class': 'avatar'}).find('img')
|
|
vendor_image = vendor_image.get('src')
|
|
vendor_image = vendor_image.split('base64,')[-1]
|
|
|
|
# Finding Prices
|
|
USD = soup.find('div', {'class': "total-price"}).find('span').text.strip()
|
|
|
|
# Finding the Product Category
|
|
cat = soup.find('div', {'class': "profile-info"}).find('p').text
|
|
category = cleanString(cat.strip())
|
|
|
|
# Finding the Product description
|
|
try:
|
|
describe = soup.find('div', {'class': "info"}).find('p').text
|
|
if "\n" in describe:
|
|
describe = describe.replace("\n", " ")
|
|
describe = describe.replace("\r", " ")
|
|
describe = cleanString(describe.strip())
|
|
except:
|
|
# print("product desc")
|
|
describe = soup.find('div', {'class': 'info'}).text
|
|
describe = cleanString(describe.strip())
|
|
|
|
# Finding Product Image
|
|
image = soup.find('div', {'class': 'image text-center'}).find('img')
|
|
image = image.get('src')
|
|
image = image.split('base64,')[-1]
|
|
|
|
# Populating the final variable (this should be a list with all fields scraped)
|
|
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
|
|
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
|
|
|
|
# Sending the results
|
|
return row
|
|
|
|
|
|
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
|
|
#stores info it needs in different lists, these lists are returned after being organized
|
|
#@param: soup object looking at html page of listing page
|
|
#return: 'row' that contains a variety of lists that each hold info on the listing page
|
|
def torbay_listing_parser(soup):
|
|
|
|
# Fields to be parsed
|
|
nm = 0 # *Total_Products (Should be Integer)
|
|
mktName = "TorBay" # 0 *Marketplace_Name
|
|
vendor = [] # 1 *Vendor y
|
|
rating_vendor = [] # 2 Vendor_Rating
|
|
success = [] # 3 Vendor_Successful_Transactions
|
|
name = [] # 4 *Product_Name y
|
|
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
|
|
MS = [] # 6 Product_MS_Classification (Microsoft Security)
|
|
category = [] # 7 Product_Category y
|
|
describe = [] # 8 Product_Description
|
|
views = [] # 9 Product_Number_Of_Views
|
|
reviews = [] # 10 Product_Number_Of_Reviews
|
|
rating_item = [] # 11 Product_Rating
|
|
addDate = [] # 12 Product_AddDate
|
|
BTC = [] # 13 Product_BTC_SellingPrice
|
|
USD = [] # 14 Product_USD_SellingPrice y
|
|
EURO = [] # 15 Product_EURO_SellingPrice
|
|
sold = [] # 16 Product_QuantitySold
|
|
qLeft =[] # 17 Product_QuantityLeft
|
|
shipFrom = [] # 18 Product_ShippedFrom
|
|
shipTo = [] # 19 Product_ShippedTo
|
|
image = [] # 20 Product_Image
|
|
image_vendor = [] # 21 Vendor_Image
|
|
href = [] # 22 Product_Links
|
|
|
|
listing = soup.findAll('div', {"class": "product-card"})
|
|
|
|
# Populating the Number of Products
|
|
nm = len(listing)
|
|
|
|
for a in listing:
|
|
|
|
product_name = a.find('p', {'class': 'name'}).text
|
|
name.append(cleanString(product_name.strip()))
|
|
|
|
# Finding Product Image
|
|
image.append("-1")
|
|
|
|
prod = a.find('p', {'class': 'price'}).text # price
|
|
USD.append(cleanString(prod.strip()))
|
|
|
|
ven = a.find('div', {'class': 'pc-footer'}).find('div').find('a').text # pc-footer
|
|
vendor.append(cleanString(ven.strip()))
|
|
# print(ven)
|
|
|
|
# Finding Vendor Image
|
|
image_vendor.append("-1")
|
|
|
|
h = a.find('p', {'class': 'name'}).find('a').get('href')
|
|
href.append(h)
|
|
|
|
CVE.append("-1")
|
|
MS.append("-1")
|
|
rating_vendor.append("-1")
|
|
success.append("-1")
|
|
describe.append("-1")
|
|
views.append("-1")
|
|
reviews.append("-1")
|
|
rating_item.append("-1")
|
|
addDate.append("-1")
|
|
BTC.append("-1")
|
|
EURO.append("-1")
|
|
sold.append("-1")
|
|
qLeft.append("-1")
|
|
shipFrom.append("-1")
|
|
shipTo.append("-1")
|
|
category.append("Hacking")
|
|
|
|
# Populate the final variable (this should be a list with all fields scraped)
|
|
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
|
|
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)
|
|
#called by the crawler to get description links on a listing page
|
|
#@param: beautifulsoup object that is using the correct html page (listing page)
|
|
#return: list of description links from a listing page
|
|
def torbay_links_parser(soup):
|
|
|
|
# Returning all links that should be visited by the Crawler
|
|
|
|
href = []
|
|
listing = soup.find('section', {"id": "content"}).findAll('div', {"class": "product-card"})
|
|
|
|
for a in listing:
|
|
bae = a.find('div', {"class": "pc-footer"}).find('a', {"class": "btn btn-primary"}, href=True)
|
|
link = bae['href']
|
|
href.append(link)
|
|
|
|
return href
|