|
|
- __author__ = 'DarkWeb'
-
- # Here, we are importing the auxiliary functions to clean or convert data
- from MarketPlaces.Utilities.utilities import *
-
- # Here, we are importing BeautifulSoup to search through the HTML tree
- from bs4 import BeautifulSoup
-
-
- # This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
- def silkroad4_description_parser(soup):
-
- # Fields to be parsed
-
- vendor = "-1" # 0 *Vendor_Name
- success = "-1" # 1 Vendor_Successful_Transactions
- rating_vendor = "-1" # 2 Vendor_Rating
- name = "-1" # 3 *Product_Name
- describe = "-1" # 4 Product_Description
- CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
- MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
- category = "-1" # 7 Product_Category
- views = "-1" # 8 Product_Number_Of_Views
- reviews = "-1" # 9 Product_Number_Of_Reviews
- rating_item = "-1" # 10 Product_Rating
- addDate = "-1" # 11 Product_AddedDate
- BTC = "-1" # 12 Product_BTC_SellingPrice
- USD = "-1" # 13 Product_USD_SellingPrice
- EURO = "-1" # 14 Product_EURO_SellingPrice
- sold = "-1" # 15 Product_QuantitySold
- left = "-1" # 16 Product_QuantityLeft
- shipFrom = "-1" # 17 Product_ShippedFrom
- shipTo = "-1" # 18 Product_ShippedTo
- LTC = "-1" # 19 Product_LTC_SellingPrice
- XMR = "-1" # 20 Product_XMR_SellingPrice
- image = "-1" # 19 Product_Image
- vendor_image = "-1" # 20 Vendor_Image
-
- bae = soup.find('div', {'id': 'cats'})
-
- # Finding Vendor
- vendor = bae.find('font', {'color':'blue'}).text
- vendor = vendor.strip()
-
- #Finding Product Name
- name = bae.find('b', {'style': 'color:#333;'}).text
- name = name.strip()
-
- #Finding image
- image = bae.find('img')
- image = image.get('src')
- image = image.split('base64,')[-1]
-
- #Finding Price
- temp = bae.find('span').next_sibling
- price_list = temp.split("/")
-
- USD = price_list[0].replace("$", "").strip()
- BTC = price_list[1].replace("BTC","").strip()
-
- # Not stored into databse/PGAdmin yet!
- LTC = price_list[2].replace("LTC", "").strip()
- XMR = price_list[3].replace("XMR", "").strip()
- #print(USD, "", BTC,"", LTC,"", XMR)
-
- # Finding Ships From
- a = bae.find_all('a',href=True, limit = 2)
- shipFrom = a[0].text.strip()
-
- #Finding Category
- category = a[1].text.strip()
-
- # Finding the Product description
- describe = soup.find('div', {'style': 'color:#555;font-weight:normal;font-size:12px'}).text
- describe = cleanString(describe.strip())
-
- # Finding Rating
- rate = soup.find('div', {'style': 'padding:0px; margin-bottom:10px; font-size:12px;'})
- rate = rate.find('p')
- if rate is not None:
- rate = rate.text.strip()
-
- #Some descriptions have rating as 'No Rating yet', can convert to -1 for consistency in database
- #if(rate is 'No rating yet'):
- # rating_item = -1
-
- #Only extract rating part
- rating_item = rate[:rate.index('Note')]
-
-
-
- # Finding Number of Reviews
- table = soup.find('div', {'class': 'table-responsive'})
- if table is not None:
- num_rev = table.findAll('tr')
- reviews = len(num_rev) - 1
-
- # Searching for CVE and MS categories
- cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
- if cve:
- CVE = " "
- for idx in cve:
- CVE += (idx)
- CVE += " "
- CVE = CVE.replace(',', ' ')
- CVE = CVE.replace('\n', '')
- ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
- if ms:
- MS = " "
- for im in ms:
- MS += (im)
- MS += " "
- MS = MS.replace(',', ' ')
- MS = MS.replace('\n', '')
-
- # Populating the final variable (this should be a list with all fields scraped)
- row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
- BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
-
-
- # Sending the results
- return row
-
-
- # This is the method to parse the Listing Pages
- def silkroad4_listing_parser(soup):
-
- # Fields to be parsed
- nm = 0 # *Total_Products (Should be Integer)
- mktName = "SilkRoad4" # 0 *Marketplace_Name
- vendor = [] # 1 *Vendor y
- rating_vendor = [] # 2 Vendor_Rating
- success = [] # 3 Vendor_Successful_Transactions
- name = [] # 4 *Product_Name y
- CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
- MS = [] # 6 Product_MS_Classification (Microsoft Security)
- category = [] # 7 Product_Category y
- describe = [] # 8 Product_Description
- views = [] # 9 Product_Number_Of_Views
- reviews = [] # 10 Product_Number_Of_Reviews
- rating_item = [] # 11 Product_Rating
- addDate = [] # 12 Product_AddDate
- BTC = [] # 13 Product_BTC_SellingPrice
- USD = [] # 14 Product_USD_SellingPrice y
- EURO = [] # 15 Product_EURO_SellingPrice
- sold = [] # 16 Product_QuantitySold
- qLeft =[] # 17 Product_QuantityLeft
- shipFrom = [] # 18 Product_ShippedFrom
- shipTo = [] # 19 Product_ShippedTo
- href = [] # 20 Product_Links
- LTC = [] # 21 Product_LTC_SellingPrice
- XMR = [] # 22 Product_XMR_SellingPrice
- image = [] # 19 Product_Image
- image_vendor = [] # 20 Vendor_Image
-
-
- listing = soup.findAll('div', {'style': "padding:10px; width:100%; margin-bottom:5px; margin-top:0px"})
-
- # Populating the Number of Products
- nm = len(listing)
-
- # Finding category of listing page
- cat = listing[0].find_all('a', href=True)
- cat = cat[len(cat)-2].text
- cat = cat.replace(",", "")
- cat = cat.strip()
-
- for a in listing:
- bae = a.findAll('a', href=True)
-
- # Adding the category
- category.append(cat)
-
-
- # Adding the url to the list of urls
- link = bae[0].get('href')
- link = cleanLink(link)
- href.append(link)
-
- # Finding Price
- temp = a.find('b', {'style': 'color:#333'})
- temp = temp.text
- price = temp.split("/")
- USD.append(price[0].replace('$', '').strip())
- BTC.append(price[1].replace('BTC', '').strip())
-
- # LTC and XMR will not be stored in Pgadmin as of now
- LTC.append(price[2].replace('LTC', '').strip())
- XMR.append(price[3].replace('XMR', '').strip())
- #print(USD, " ", BTC, ' ', LTC, '', XMR)
-
- # Finding the Vendor
- ven = a.findAll('b')
- v = ven[2].find('font').text
- v = v.strip()
- vendor.append(v)
-
- # Finding the Product
- product = bae[0].text
- product = product.replace(",", "")
- product = product.replace(",", "")
- product = product.replace("...", "")
- product = product.strip()
- name.append(product)
-
-
- # Finding ShipFrom
- shipf = bae[len(bae)-1].text.strip()
- shipFrom.append(shipf)
-
- # Finding image
- product_image = a.find('img')
- product_image = product_image.get('src')
- product_image = product_image.split('base64,')[-1]
- image.append(product_image)
-
- # Searching for CVE and MS categories
- cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
- if not cve:
- cveValue="-1"
- else:
- cee = " "
- for idx in cve:
- cee += (idx)
- cee += " "
- cee = cee.replace(',', ' ')
- cee = cee.replace('\n', '')
- cveValue=cee
- CVE.append(cveValue)
-
- ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
- if not ms:
- MSValue="-1"
- else:
- me = " "
- for im in ms:
- me += (im)
- me += " "
- me = me.replace(',', ' ')
- me = me.replace('\n', '')
- MSValue=me
- MS.append(MSValue)
-
- # Populate the final variable (this should be a list with all fields scraped)
- return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
- reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image,
- image_vendor)
-
-
- def silkroad4_links_parser(soup):
-
- # Returning all links that should be visited by the Crawler
- href = []
-
- # finds all div with id, vp
- divs = soup.findAll('div', {"id": "vp"})
-
- listing = []
-
- # for all div with id:vp, find first a with href
- for div in divs:
- a_s = div.find('a', href=True)
-
- # if div has no href True, then move on. otherwise, store the a with href into listing
- if a_s is not None:
- listing.append(a_s)
-
- # loop through listing with a with href and extract/return the href
- for a in listing:
- link = a['href']
-
- # if '?listing' is in link, then it is a product, so append to the href list. Otherwise, move on.
- if "?listing" in link:
- href.append(link)
-
- return href
|