__author__ = 'DarkWeb' # Here, we are importing the auxiliary functions to clean or convert data from MarketPlaces.Utilities.utilities import * # Here, we are importing BeautifulSoup to search through the HTML tree from bs4 import BeautifulSoup # parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs # stores info it needs in different lists, these lists are returned after being organized # @param: soup object looking at html page of description page # return: 'row' that contains a variety of lists that each hold info on the description page def metaversemarket_description_parser(soup): # Fields to be parsed vendor = "-1" # 0 *Vendor_Name success = "-1" # 1 Vendor_Successful_Transactions rating_vendor = "-1" # 2 Vendor_Rating name = "-1" # 3 *Product_Name describe = "-1" # 4 Product_Description CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) MS = "-1" # 6 Product_MS_Classification (Microsoft Security) category = "-1" # 7 Product_Category views = "-1" # 8 Product_Number_Of_Views reviews = "-1" # 9 Product_Number_Of_Reviews rating_item = "-1" # 10 Product_Rating addDate = "-1" # 11 Product_AddedDate BTC = "-1" # 12 Product_BTC_SellingPrice USD = "-1" # 13 Product_USD_SellingPrice EURO = "-1" # 14 Product_EURO_SellingPrice sold = "-1" # 15 Product_QuantitySold left = "-1" # 16 Product_QuantityLeft shipFrom = "-1" # 17 Product_ShippedFrom shipTo = "-1" # 18 Product_ShippedTo image = "-1" # 19 Product_Image vendor_image = "-1" # 20 Vendor_Image # Finding Product Name name = soup.find('div', {'class': "panel-heading"}).text name = cleanString(name.strip()) temp = soup.findAll('div', {'class': "col-xs-12 col-sm-6 mt-5"}) # Finding Product Image image = temp[0].find('img') image = image.get('src') image = image.split('base64,')[-1] # Finding Vendor temp = temp[1].findAll('span') vendor = temp[1].find('b').text vendor = cleanString(vendor.strip()) # Finding Vendor Rating pos = soup.find('span', {'class': "badge bg-success fs-12px"}).text pos = int(cleanNumbers(pos).strip()) neg = soup.find('span', {'class': "badge bg-danger fs-12px"}).text neg = int(cleanNumbers(neg).strip()) total = pos + neg if total > 0: rating_vendor = str(pos / total) # Finding Prices USD = soup.find('h3', {'class': "mb-2"}).text USD = cleanNumbers(USD).strip() # Finding the Product Category temp = soup.select('div[class="mt-2"]')[1].text temp = temp.replace("Category:", "") category = temp.strip() # Finding Number of Views views = soup.find('button', {"class": "btn btn-secondary text-center w-33 fw-bold"}).text views = views.strip() # Finding the Product Quantity Available temp = soup.find('button', {"class": "btn btn-success text-center w-33 fw-bold"}).text temp = temp.split("/") left = temp[1].strip() # Finding Number Sold sold = temp[0].strip() # Finding Shipment Information (Origin) temp = soup.find('div', {'class': "alert alert-info"}).text temp = temp.split("to") shipFrom = temp[0].replace("Shipping from ", "").strip() # Finding Shipment Information (Destination) shipTo = temp[1].split("for") shipTo = shipTo[0].strip() # Finding the Product description describe = soup.find('p', {'class': "card-text"}).text describe = cleanString(describe.strip()) # Searching for CVE and MS categories cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) if cve: CVE = " " for idx in cve: CVE += (idx) CVE += " " CVE = CVE.replace(',', ' ') CVE = CVE.replace('\n', '') ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) if ms: MS = " " for im in ms: MS += (im) MS += " " MS = MS.replace(',', ' ') MS = MS.replace('\n', '') # Populating the final variable (this should be a list with all fields scraped) row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image) # Sending the results return row # parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs # stores info it needs in different lists, these lists are returned after being organized # @param: soup object looking at html page of listing page # return: 'row' that contains a variety of lists that each hold info on the listing page def metaversemarket_listing_parser(soup): # Fields to be parsed nm = 0 # *Total_Products (Should be Integer) mktName = "MetaVerseMarket" # 0 *Marketplace_Name vendor = [] # 1 *Vendor y rating_vendor = [] # 2 Vendor_Rating success = [] # 3 Vendor_Successful_Transactions name = [] # 4 *Product_Name y CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this category = [] # 7 Product_Category y describe = [] # 8 Product_Description views = [] # 9 Product_Number_Of_Views reviews = [] # 10 Product_Number_Of_Reviews rating_item = [] # 11 Product_Rating addDate = [] # 12 Product_AddDate BTC = [] # 13 Product_BTC_SellingPrice USD = [] # 14 Product_USD_SellingPrice y EURO = [] # 15 Product_EURO_SellingPrice sold = [] # 16 Product_QuantitySold qLeft = [] # 17 Product_QuantityLeft shipFrom = [] # 18 Product_ShippedFrom shipTo = [] # 19 Product_ShippedTo image = [] # 20 Product_Image image_vendor = [] # 21 Vendor_Image href = [] # 22 Product_Links listing = soup.findAll('div', {"class": "col-12 col-sm-4 col-xl-3 product_item_col p-1"}) # Populating the Number of Products nm = len(listing) for a in listing: bae = a.findAll('a', href=True) # Adding the url to the list of urls link = bae[0].get('href') link = cleanLink(link) href.append(link) # Finding the Product product = bae[1].find('span', {"class": "text-primary"}).text name.append(cleanString(product.strip())) # Finding Prices price = a.find('strong').text USD.append(cleanNumbers(price).strip()) # Finding the Vendor temp = a.find('div', {'class': "mt-1 fs-12px"}) temp = temp.findAll('span') vendor_name = temp[1].find('b').text vendor.append(cleanString(vendor_name.strip())) # Finding the Category cat = a.select_one('div[class="fs-12px"]') cat = cat.findAll('span')[1].text cat = cat.strip() category.append(cat) ul = a.find('ul', {"class": "product-actions"}) # Finding Number Sold and Quantity Left temp = ul.find('span', {'class': "badge bg-success"}).text temp = temp.split("/") num = temp[0] num = num.replace('k', '000') sold.append(cleanNumbers(num).strip()) quant = temp[1] quant = quant.replace('k', '000') qLeft.append(cleanNumbers(quant).strip()) # Finding Descrption # description = a.find('p', {'class': "alert alert-light text-ssbold p-1"}).text # description = description.replace("\n", " ") # description = description.strip() # describe.append(cleanString(description)) # Finding Number of Views view = ul.find('span', {'class': "badge bg-primary"}).text view = view.replace('.', '') view = view.replace('K', '000') views.append(view.strip()) # Find where ships from ships = a.find('div', {'class': "alert alert-info item_alert fs-12px p-1"}) ships = ships.findAll('b') sFrom = ships[0].text.strip() shipFrom.append(sFrom) # Find where it ships to sTo = ships[1].text.strip() shipTo.append(sTo) # Searching for CVE and MS categories cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) if not cve: cveValue = "-1" else: cee = " " for idx in cve: cee += (idx) cee += " " cee = cee.replace(',', ' ') cee = cee.replace('\n', '') cveValue = cee CVE.append(cveValue) ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) if not ms: MSValue = "-1" else: me = " " for im in ms: me += (im) me += " " me = me.replace(',', ' ') me = me.replace('\n', '') MSValue = me MS.append(MSValue) # Populate the final variable (this should be a list with all fields scraped) return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor) # called by the crawler to get description links on a listing page # @param: beautifulsoup object that is using the correct html page (listing page) # return: list of description links from a listing page def metaversemarket_links_parser(soup): # Returning all links that should be visited by the Crawler href = [] listing = soup.findAll('div', {"class": "col-12 col-sm-4 col-xl-3 product_item_col p-1"}) for a in listing: bae = a.find('a', href=True) link = bae['href'] href.append(link) return href