|
|
- __author__ = 'DarkWeb'
-
- # Here, we are importing the auxiliary functions to clean or convert data
- from MarketPlaces.Utilities.utilities import *
-
- # Here, we are importing BeautifulSoup to search through the HTML tree
- from bs4 import BeautifulSoup
-
-
- #parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
- #stores info it needs in different lists, these lists are returned after being organized
- #@param: soup object looking at html page of description page
- #return: 'row' that contains a variety of lists that each hold info on the description page
- def darkmatter_description_parser(soup):
-
- # Fields to be parsed
-
- vendor = "-1" # 0 *Vendor_Name
- success = "-1" # 1 Vendor_Successful_Transactions
- rating_vendor = "-1" # 2 Vendor_Rating
- name = "-1" # 3 *Product_Name
- describe = "-1" # 4 Product_Description
- CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
- MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
- category = "-1" # 7 Product_Category
- views = "-1" # 8 Product_Number_Of_Views
- reviews = "-1" # 9 Product_Number_Of_Reviews
- rating_item = "-1" # 10 Product_Rating
- addDate = "-1" # 11 Product_AddedDate
- BTC = "-1" # 12 Product_BTC_SellingPrice
- USD = "-1" # 13 Product_USD_SellingPrice
- EURO = "-1" # 14 Product_EURO_SellingPrice
- sold = "-1" # 15 Product_QuantitySold
- left = "-1" # 16 Product_QuantityLeft
- shipFrom = "-1" # 17 Product_ShippedFrom
- shipTo = "-1" # 18 Product_ShippedTo
-
- vendor = "-1" # 0 *Vendor_Name
- success = "-1" # 1 Vendor_Successful_Transactions
- rating_vendor = "-1" # 2 Vendor_Rating
-
-
- # product name
- try:
- name = soup.find('head').find('title').text
- name = cleanString(name.strip())
- except:
- print("name")
-
- #product description
- try:
- temp = soup.find('pre', {'class', 'description'}).text
- temp = temp.replace('\n', ' ')
- describe = cleanString(temp.strip())
- except:
- print("description")
-
-
-
- CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
- MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
- category = "-1" # 7 Product_Category
- views = "-1" # 8 Product_Number_Of_Views
- reviews = "-1" # 9 Product_Number_Of_Reviews
- rating_item = "-1" # 10 Product_Rating
- addDate = "-1" # 11 Product_AddedDate
- BTC = "-1" # 12 Product_BTC_SellingPrice
- USD = "-1" # 13 Product_USD_SellingPrice
- EURO = "-1" # 14 Product_EURO_SellingPrice
- sold = "-1" # 15 Product_QuantitySold
- left = "-1" # 16 Product_QuantityLeft
- shipFrom = "-1" # 17 Product_ShippedFrom
- shipTo = "-1" # 18 Product_ShippedTo
-
- # Populating the final variable (this should be a list with all fields scraped)
- row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
- BTC, USD, EURO, sold, left, shipFrom, shipTo)
-
- # Sending the results
- return row
-
-
- #parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
- #stores info it needs in different lists, these lists are returned after being organized
- #@param: soup object looking at html page of listing page
- #return: 'row' that contains a variety of lists that each hold info on the listing page
- def darkmatter_listing_parser(soup):
-
- # Fields to be parsed
- nm = 0 # Total_Products (Should be Integer)
- mktName = "DarkMatter" # 0 Marketplace_Name
- name = [] # 1 Product_Name
- CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures)
- MS = [] # 3 Product_MS_Classification (Microsoft Security)
- category = [] # 4 Product_Category
- describe = [] # 5 Product_Description
- escrow = [] # 6 Vendor_Warranty
- views = [] # 7 Product_Number_Of_Views
- reviews = [] # 8 Product_Number_Of_Reviews
- addDate = [] # 9 Product_AddDate
- lastSeen = [] # 10 Product_LastViewDate
- BTC = [] # 11 Product_BTC_SellingPrice
- USD = [] # 12 Product_USD_SellingPrice
- EURO = [] # 13 Product_EURO_SellingPrice
- sold = [] # 14 Product_QuantitySold
- qLeft =[] # 15 Product_QuantityLeft
- shipFrom = [] # 16 Product_ShippedFrom
- shipTo = [] # 17 Product_ShippedTo
- vendor = [] # 18 Vendor
- rating = [] # 19 Vendor_Rating
- success = [] # 20 Vendor_Successful_Transactions
- href = [] # 23 Product_Links (Urls)
-
- listing = soup.findAll('div', {"class": "card"})
-
- # Populating the Number of Products
- nm = len(listing)
-
- for a in listing:
- bae = a.findAll('a', href=True)
-
- # Adding the url to the list of urls
- link = bae[0].get('href')
- link = cleanLink(link)
- href.append(link)
-
- # Finding the Product
- product = bae[1].find('p').text
- product = product.replace('\n', ' ')
- product = product.replace(",", "")
- product = product.replace("...", "")
- product = product.strip()
- name.append(product)
-
- bae = a.find('div', {'class': "media-content"}).find('div').find_all('div')
-
- if len(bae) >= 5:
- # Finding Prices
- price = bae[0].text
- ud = price.replace(" USD", " ")
- # u = ud.replace("$","")
- u = ud.replace(",", "")
- u = u.strip()
- USD.append(u)
- # bc = (prc[1]).strip(' BTC')
- # BTC.append(bc)
-
- # Finding the Vendor
- vendor_name = bae[1].find('a').text
- vendor_name = vendor_name.replace(",", "")
- vendor_name = vendor_name.strip()
- vendor.append(vendor_name)
-
- # Finding the Category
- cat = bae[2].find('small').text
- cat = cat.replace("Category: ", "")
- cat = cat.replace(",", "")
- cat = cat.strip()
- category.append(cat)
-
- # Finding Number Sold and Quantity Left
- num = bae[3].text
- num = num.replace("Sold: ", "")
- num = num.strip()
- sold.append(num)
-
- quant = bae[4].find('small').text
- quant = quant.replace("In stock: ", "")
- quant = quant.strip()
- qLeft.append(quant)
-
- # Finding Successful Transactions
- freq = bae[1].text
- freq = freq.replace(vendor_name, "")
- freq = re.sub(r'Vendor Level \d+', "", freq)
- freq = freq.replace("(", "")
- freq = freq.replace(")", "")
- freq = freq.strip()
- success.append(freq)
-
- # Searching for CVE and MS categories
- cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
- if not cve:
- cveValue="-1"
- else:
- cee = " "
- for idx in cve:
- cee += (idx)
- cee += " "
- cee = cee.replace(',', ' ')
- cee = cee.replace('\n', '')
- cveValue=cee
- CVE.append(cveValue)
-
- ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
- if not ms:
- MSValue="-1"
- else:
- me = " "
- for im in ms:
- me += (im)
- me += " "
- me = me.replace(',', ' ')
- me = me.replace('\n', '')
- MSValue=me
- MS.append(MSValue)
-
- # Populate the final variable (this should be a list with all fields scraped)
- return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen,
- BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href)
-
-
- #called by the crawler to get description links on a listing page
- #@param: beautifulsoup object that is using the correct html page (listing page)
- #return: list of description links from a listing page
- def darkmatter_links_parser(soup):
-
- # Returning all links that should be visited by the Crawler
-
- href = []
- listing = soup.find('div', {"class": "content"}).findAll('td', {"class": "lefted", 'colspan': '3'})
-
- for a in listing:
- bae = a.find('a', href=True)
- link = bae['href']
- href.append(link)
-
- return href
|