|
|
- __author__ = 'DarkWeb'
-
- # Here, we are importing the auxiliary functions to clean or convert data
- from typing import List, Tuple
- from MarketPlaces.Utilities.utilities import *
-
- # Here, we are importing BeautifulSoup to search through the HTML tree
- from bs4 import BeautifulSoup, ResultSet, Tag
-
-
- def thiefWorld_description_parser(soup: BeautifulSoup) -> Tuple:
-
- # Fields to be parsed
- vendor = "-1" # 0 *Vendor_Name
- success = "-1" # 1 Vendor_Successful_Transactions
- rating_vendor = "-1" # 2 Vendor_Rating
- name = "-1" # 3 *Product_Name
- describe = "-1" # 4 Product_Description
- CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
- MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
- category = "-1" # 7 Product_Category
- views = "-1" # 8 Product_Number_Of_Views
- reviews = "-1" # 9 Product_Number_Of_Reviews
- rating_item = "-1" # 10 Product_Rating
- addDate = "-1" # 11 Product_AddedDate
- BTC = "-1" # 12 Product_BTC_SellingPrice
- USD = "-1" # 13 Product_USD_SellingPrice
- EURO = "-1" # 14 Product_EURO_SellingPrice
- sold = "-1" # 15 Product_QuantitySold
- left = "-1" # 16 Product_QuantityLeft
- shipFrom = "-1" # 17 Product_ShippedFrom
- shipTo = "-1" # 18 Product_ShippedTo
- image = "-1" # 19 Product_Image
- vendor_image = "-1" # 20 Vendor_Image
-
- name = soup.find("h1", {'class': 'title'}).text
- name = cleanString(name.strip())
-
- describe = soup.find('div', {'id': 'descriptionContent'}).text
- describe = cleanString(describe.strip())
-
- # Finding Product Image
- image = soup.find('div', {'class': 'product_img_big'}).find('img')
- image = image.get('src')
- image = image.split('base64,')[-1]
-
- commentListTag: Tag = soup.find('ul', {'class': 'comment_list scrollbar'})
- commentList = commentListTag.find_all('li')
- review = str(len(commentList))
-
- citySelection: str = soup.find('ul', {'class': 'meta text-muted i_location'}).text
- shipFrom = cleanString(citySelection.strip())
-
- vendor = soup.find('h1', {'class': 'title over'}).text
- vendor = cleanString(vendor.strip())
-
- usdTag: Tag = soup.find('div', {'class': 'product_price__big'}).find('span')
- usdText = usdTag.text.strip('/')[0]
- # usdText format: "<value> USD " (i.e., "70 000 USD ")
- USD = cleanString(usdText.replace("USD", "").strip())
-
- ratingDiv = soup.find('div', {'class': 'rating_star'})
- rating_vendor = ratingDiv.get('title').split(' ')[1]
-
- rating_item = soup.find('div', {'class': 'product_rate'}).text
- rating_item = rating_item.replace("rating", "")
- rating_item = cleanString(rating_item.strip())
-
- # Populating the final variable (this should be a list with all fields scraped)
- row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
- BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
-
- # Sending the results
- return row
-
-
- def thiefWorld_listing_parser(soup: BeautifulSoup):
-
- # Fields to be parsed
- nm = 0 # Total_Products (Should be Integer)
- mktName = "ThiefWorld" # 0 Marketplace_Name
- vendor = [] # 1 *Vendor y
- rating_vendor = [] # 2 Vendor_Rating
- success = [] # 3 Vendor_Successful_Transactions
- name = [] # 4 *Product_Name y
- CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
- MS = [] # 6 Product_MS_Classification (Microsoft Security)
- category = [] # 7 Product_Category y
- describe = [] # 8 Product_Description
- views = [] # 9 Product_Number_Of_Views
- reviews = [] # 10 Product_Number_Of_Reviews
- rating_item = [] # 11 Product_Rating
- addDate = [] # 12 Product_AddDate
- BTC = [] # 13 Product_BTC_SellingPrice
- USD = [] # 14 Product_USD_SellingPrice y
- EURO = [] # 15 Product_EURO_SellingPrice
- sold = [] # 16 Product_QuantitySold
- qLeft =[] # 17 Product_QuantityLeft
- shipFrom = [] # 18 Product_ShippedFrom
- shipTo = [] # 19 Product_ShippedTo
- image = [] # 20 Product_Image
- image_vendor = [] # 21 Vendor_Image
- href = [] # 22 Product_Links
-
- productList: ResultSet[Tag] = soup.find_all('div', {'class': 'catalog_item'})
-
- nm = len(productList)
-
- for product in productList:
-
- productTitle: Tag = product.find('div', {'class': 'title'}).find('a')
-
- productName = cleanString(productTitle.text.strip())
- name.append(productName)
-
- # Finding Product Image
- product_image = product.find('noscript').find('img')
- product_image = product_image.get('src')
- product_image = product_image.split('base64,')[-1]
- image.append(product_image)
-
- productHref = productTitle.get('href')
- href.append(productHref)
-
- CVE.append('-1')
- MS.append('-1')
-
- cat = soup.find('calsys-cat').text
- category.append(cat.strip())
-
- productDescription = product.find('div', {'class': 'text'}).text
- productDescription = cleanString(productDescription.strip())
- describe.append(productDescription)
-
- views.append('-1')
- reviews.append('-1')
- addDate.append('-1')
- BTC.append('-1')
-
- priceText = product.find('span', {'class': 'price'}).find('span').text
- priceText = priceText.split('USD')[0]
- priceText = cleanString(priceText.strip())
- USD.append(priceText)
-
- EURO.append('-1')
- sold.append('-1')
- qLeft.append('-1')
- shipFrom.append('-1')
- shipTo.append('-1')
-
- productVendor = product.find('div', {'class': 'market over'}).find('a').text
- productVendor = cleanString(productVendor.strip())
- vendor.append(productVendor)
-
- image_vendor.append('-1')
-
- rating_vendor.append('-1')
- #rating_item.append('-1')
-
- rating = product.find('div', {'class': 'rating_star_yellow'}).attrs.get('style')
- rating = rating.replace("width: ", "")
- rating_item.append(cleanString(rating))
-
- success.append('-1')
-
-
- # Populate the final variable (this should be a list with all fields scraped)
- return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
- reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)
-
-
-
-
-
- #called by the crawler to get description links on a listing page
- #@param: beautifulsoup object that is using the correct html page (listing page)
- #return: list of description links from a listing page
- def thiefworld_links_parser(soup):
-
- # Returning all links that should be visited by the Crawler
-
- href = []
- listing = soup.find('div', {"class": "row tile__list tileitems_filter pad15 tileproduct__list"}).findAll('div', {"class": "desc"})
-
- for a in listing:
- bae = a.find('div', {"class": "title"}).find('a', href=True)
- link = bae['href']
- href.append(link)
-
- return href
|