__author__ = 'DarkWeb' # Here, we are importing the auxiliary functions to clean or convert data from MarketPlaces.Utilities.utilities import * # Here, we are importing BeautifulSoup to search through the HTML tree from bs4 import BeautifulSoup # parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs # stores info it needs in different lists, these lists are returned after being organized # @param: soup object looking at html page of description page # return: 'row' that contains a variety of lists that each hold info on the description page def darkbay_description_parser(soup): # Fields to be parsed vendor = "-1" # 0 *Vendor_Name success = "-1" # 1 Vendor_Successful_Transactions rating_vendor = "-1" # 2 Vendor_Rating name = "-1" # 3 *Product_Name describe = "-1" # 4 Product_Description CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) MS = "-1" # 6 Product_MS_Classification (Microsoft Security) category = "-1" # 7 Product_Category views = "-1" # 8 Product_Number_Of_Views reviews = "-1" # 9 Product_Number_Of_Reviews rating_item = "-1" # 10 Product_Rating addDate = "-1" # 11 Product_AddedDate BTC = "-1" # 12 Product_BTC_SellingPrice USD = "-1" # 13 Product_USD_SellingPrice EURO = "-1" # 14 Product_EURO_SellingPrice sold = "-1" # 15 Product_QuantitySold left = "-1" # 16 Product_QuantityLeft shipFrom = "-1" # 17 Product_ShippedFrom shipTo = "-1" # 18 Product_ShippedTo image = "-1" # 19 Product_Image vendor_image = "-1" # 20 Vendor_Image # vendor name vendor = soup.find('div', {'class', 'vendor'}).find('a').text # product name name = soup.find('h3').text # description describe = soup.find('div', {'class', 'description'}).text.replace('\n', ' ') # category category = soup.find('i', {'class', 'fas fa-folder'}).text # finding price money = soup.find('div', {'class', 'price'}) BTC = money.find('span', {'class', 'btc'}).text.replace('$','') USD = money.find('span', {'class', 'usd'}).text.replace('₿','') if soup.find('div', {'class': 'instock many'}): left = soup.find('div', {'class': 'instock many'}).text if 'in stock' in left: left.replace('in stock', '').strip() if left == '': left = '-1' # ship to and ship from location = soup.find('dl', {'class': 'compact ships mb1'}).findAll('dd') shipFrom = location[0].text shipFrom = ''.join(c for c in shipFrom if c.isalnum()) shipFrom = shipFrom.strip() shipFrom = cleanString(shipFrom) shipTo = location[1].text shipTo = ''.join(c for c in shipTo if c.isalnum()) shipTo = shipTo.strip() # Finding Product Image if soup.find('div', {"class": 'product-photos'}): image = soup.find('div', {"class": 'product-photos'}).find('img') image = image.get('src') image = image.split('base64,')[-1] else: image = "-1" # Populating the final variable (this should be a list with all fields scraped) row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image) # Sending the results return row # parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs # stores info it needs in different lists, these lists are returned after being organized # @param: soup object looking at html page of listing page # return: 'row' that contains a variety of lists that each hold info on the listing page def darkbay_listing_parser(soup): # Fields to be parsed nm = 0 # *Total_Products (Should be Integer) mktName = "DarkBay" # 0 *Marketplace_Name vendor = [] # 1 *Vendor y rating_vendor = [] # 2 Vendor_Rating success = [] # 3 Vendor_Successful_Transactions name = [] # 4 *Product_Name y CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this category = [] # 7 Product_Category y describe = [] # 8 Product_Description views = [] # 9 Product_Number_Of_Views reviews = [] # 10 Product_Number_Of_Reviews rating_item = [] # 11 Product_Rating addDate = [] # 12 Product_AddDate BTC = [] # 13 Product_BTC_SellingPrice USD = [] # 14 Product_USD_SellingPrice y EURO = [] # 15 Product_EURO_SellingPrice sold = [] # 16 Product_QuantitySold qLeft = [] # 17 Product_QuantityLeft shipFrom = [] # 18 Product_ShippedFrom shipTo = [] # 19 Product_ShippedTo image = [] # 20 Product_Image image_vendor = [] # 21 Vendor_Image href = [] # 22 Product_Links cat = soup.find('div', {'class': 'path'}).findAll('a') listing = soup.find('div', {"class": 'items'}).findAll('form', {"action": '/cart'}) # Populating the Number of Products nm = len(listing) for a in listing: category.append(cat[1].text) # Adding the url to the list of urls link = a.find('a').get('href') link = cleanLink(link) href.append(link) # Finding the Product name product = a.find('a').text product = product.replace('\n', ' ') product = product.replace(",", "") product = product.replace("...", "") product = product.strip() name.append(product) # Finding Product Image product_image = a.find('img') if product_image is not None: product_image = product_image.get('src') product_image = product_image.split('base64,')[-1] image.append(product_image) else: image.append('-1') # vendor name ven = a.find('div', {'class', 'vendor'}).find('a').text ven = cleanString(ven) vendor.append(ven) # find price in usd usd = a.find('div', {'class': 'price'}).text usd = usd.replace('$','') USD.append(usd) rating_vendor.append('-1') success.append('-1') CVE.append('-1') MS.append('-1') describe.append('-1') views.append('-1') reviews.append('-1') rating_item.append('-1') addDate.append('-1') BTC.append('-1') EURO.append('-1') sold.append('-1') qLeft.append('-1') shipFrom.append('-1') shipTo.append('-1') image_vendor.append('-1') # Populate the final variable (this should be a list with all fields scraped) return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor) # called by the crawler to get description links on a listing page # @param: beautifulsoup object that is using the correct html page (listing page) # return: list of description links from a listing page def darkbay_links_parser(soup): # Returning all links that should be visited by the Crawler href = [] listing = soup.find('div', {"class": "items"}).findAll('form', {'class':'item'}) for a in listing: bae = a.findAll('a', href=True) # Adding the url to the list of urls link = bae[0].get('href') href.append(link) return href