__author__ = 'DarkWeb' # Here, we are importing the auxiliary functions to clean or convert data from MarketPlaces.Utilities.utilities import * # Here, we are importing BeautifulSoup to search through the HTML tree from bs4 import BeautifulSoup # parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs # stores info it needs in different lists, these lists are returned after being organized # @param: soup object looking at html page of description page # return: 'row' that contains a variety of lists that each hold info on the description page def ares_description_parser(soup): # Fields to be parsed vendor = "-1" # 0 *Vendor_Name success = "-1" # 1 Vendor_Successful_Transactions rating_vendor = "-1" # 2 Vendor_Rating name = "-1" # 3 *Product_Name describe = "-1" # 4 Product_Description CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) MS = "-1" # 6 Product_MS_Classification (Microsoft Security) category = "-1" # 7 Product_Category views = "-1" # 8 Product_Number_Of_Views reviews = "-1" # 9 Product_Number_Of_Reviews rating_item = "-1" # 10 Product_Rating addDate = "-1" # 11 Product_AddedDate BTC = "-1" # 12 Product_BTC_SellingPrice USD = "-1" # 13 Product_USD_SellingPrice EURO = "-1" # 14 Product_EURO_SellingPrice sold = "-1" # 15 Product_QuantitySold left = "-1" # 16 Product_QuantityLeft shipFrom = "-1" # 17 Product_ShippedFrom shipTo = "-1" # 18 Product_ShippedTo image = "-1" # 19 Product_Image vendor_image = "-1" # 20 Vendor_Image # Finding Product Name divmb = soup.find('div', {'class': "col-md-12 my-2"}) name = divmb.find('span', {'class': "btn btn-sm btn-outline-dark w-100 active rounded-0"}).text name = name.replace('\n', ' ') name = name.replace(",", "") name = name.strip() box = soup.find('div', {'class': "col-md-7"}).find('span') box = box.findAll('span', {'class': "btn btn-mgray btn-sm w-100 active border-danger"}) # Finding Vendor vendor = soup.find('a', {'class': "btn btn-sm btn-mgray my-1 w-100 text-white"}).get('href') vendor = vendor.split('otherParty=')[-1] vendor = cleanString(vendor).strip() # Finding Vendor Rating temp = box[1] rating_vendor = len(temp.findAll('i', {"class": "fas fa-star"})) half_stars = len(temp.findAll('i', {'class': "fas fa-star-half-alt"})) if half_stars > 0: rating_vendor += 0.5 # Finding Successful Transactions success = box[2].text success = cleanNumbers(success).strip() box2 = soup.find('div', {"class": "col-md-4 text-center"}).find('span', {"class": "text-left"}).findAll('span') # Finding USD USD = box2[0].text USD = USD.replace('\n', '') USD = USD.replace('$', '') USD = USD.strip() # Finding Vendor Image vendor_image = soup.find('img', {"class": 'img-fluid'}).get('src') vendor_image = vendor_image.split('base64,')[-1] # Finding the Product description temp = soup.find('div', {"class": 'row-md-12'}).find('div', {"class": 'col-md-4'}) cardbody = temp.find('textarea', {"class": 'disabled form-control form-control-sm w-100 bg-mgray text-white rounded-0 border-danger'}) describe = cleanString(cardbody.text).strip() # Finding Product Image image = soup.find('div', {"class": 'row-md-12'}).find('div', {"class": 'col-md-4 text-center'}).find('img') if image is not None: image = image.get('src') image = image.split('base64,')[-1] else: image = "-1" # Searching for CVE and MS categories cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) if cve: CVE = " " for idx in cve: CVE += (idx) CVE += " " CVE = CVE.replace(',', ' ') CVE = CVE.replace('\n', '') ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) if ms: MS = " " for im in ms: MS += (im) MS += " " MS = MS.replace(',', ' ') MS = MS.replace('\n', '') # Populating the final variable (this should be a list with all fields scraped) row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image) # Sending the results return row # parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs # stores info it needs in different lists, these lists are returned after being organized # @param: soup object looking at html page of listing page # return: 'row' that contains a variety of lists that each hold info on the listing page def ares_listing_parser(soup): # Fields to be parsed nm = 0 # *Total_Products (Should be Integer) mktName = "Ares" # 0 *Marketplace_Name vendor = [] # 1 *Vendor y rating_vendor = [] # 2 Vendor_Rating success = [] # 3 Vendor_Successful_Transactions name = [] # 4 *Product_Name y CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this category = [] # 7 Product_Category y describe = [] # 8 Product_Description views = [] # 9 Product_Number_Of_Views reviews = [] # 10 Product_Number_Of_Reviews rating_item = [] # 11 Product_Rating addDate = [] # 12 Product_AddDate BTC = [] # 13 Product_BTC_SellingPrice USD = [] # 14 Product_USD_SellingPrice y EURO = [] # 15 Product_EURO_SellingPrice sold = [] # 16 Product_QuantitySold qLeft = [] # 17 Product_QuantityLeft shipFrom = [] # 18 Product_ShippedFrom shipTo = [] # 19 Product_ShippedTo image = [] # 20 Product_Image image_vendor = [] # 21 Vendor_Image href = [] # 22 Product_Links cat = soup.find('span', {"class": "btn btn-sm btn-outline-dark w-100 active"}).text cat = cleanString(cat).strip() listing = soup.find('div', {"class": 'card-body text-black text-left bg-dark'}).findAll('div', {"class": 'card mb-4 border-danger rounded-0'}) # Populating the Number of Products nm = len(listing) for a in listing: category.append(cat) # Adding the url to the list of urls link = a.find('a', {'class': "badge badge-danger w-100 text-white"}).get('href') link = cleanLink(link) href.append(link) # Finding the Product name product = a.find('div', {"class": 'marquee-parent'}).find('div', {"class": "marquee-child"}).text product = product.replace('\n', ' ') product = product.replace(",", "") product = product.replace("...", "") product = product.strip() name.append(product) # Finding Product Image product_image = a.find('img') product_image = product_image.get('src') product_image = product_image.split('base64,')[-1] image.append(product_image) # Finding Prices price = a.findAll('a', {"class": "text-white"})[-1].text price = price.replace("$","") price = price.strip() USD.append(price) # Finding Item Rating temp = a.find('small', {"class": "text-white"}) rating = len(temp.findAll('i', {"class": "fas fa-star"})) half_stars = len(temp.findAll('i', {'class': "fas fa-star-half-alt"})) if half_stars > 0: rating += 0.5 rating_item.append(str(rating)) # Finding the Vendor vendor_name = a.find('a', {"class": 'badge badge-dark w-100 text-white my-1'}).text vendor_name = vendor_name.replace(",", "") vendor_name = vendor_name.strip() vendor.append(vendor_name) image_vendor.append("-1") # Searching for CVE and MS categories cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) if not cve: cveValue = "-1" else: cee = " " for idx in cve: cee += (idx) cee += " " cee = cee.replace(',', ' ') cee = cee.replace('\n', '') cveValue = cee CVE.append(cveValue) ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) if not ms: MSValue = "-1" else: me = " " for im in ms: me += (im) me += " " me = me.replace(',', ' ') me = me.replace('\n', '') MSValue = me MS.append(MSValue) # Populate the final variable (this should be a list with all fields scraped) return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor) # called by the crawler to get description links on a listing page # @param: beautifulsoup object that is using the correct html page (listing page) # return: list of description links from a listing page def ares_links_parser(soup): # Returning all links that should be visited by the Crawler href = [] listing = soup.findAll('div', {"class": "col-md-4 my-md-0 my-2 col-12"}) for a in listing: bae = a.findAll('a', href=True) # Adding the url to the list of urls link = bae[0].get('href') href.append(link) return href