Browse Source

updated utilities.py to fix bug with clean_html()

main
Joshua 1 year ago
parent
commit
db93632843
6 changed files with 236 additions and 4 deletions
  1. +1
    -1
      .idea/DW_Pipeline_Test.iml
  2. +1
    -1
      .idea/misc.xml
  3. +1
    -1
      Forums/Utilities/utilities.py
  4. +1
    -1
      MarketPlaces/Utilities/utilities.py
  5. +0
    -0
      MarketPlaces/ViceCity/crawler_selenium.py
  6. +232
    -0
      MarketPlaces/ViceCity/parser.py

+ 1
- 1
.idea/DW_Pipeline_Test.iml View File

@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="C:\Users\Helium\anaconda3" jdkType="Python SDK" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyNamespacePackagesService">


+ 1
- 1
.idea/misc.xml View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="C:\Users\Helium\anaconda3" project-jdk-type="Python SDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="C:\Users\John Wick\anaconda3" project-jdk-type="Python SDK" />
</project>

+ 1
- 1
Forums/Utilities/utilities.py View File

@ -292,7 +292,7 @@ def cleanHTML(html):
clean_html = re.sub(r"<embed.*scriptable?>", "", clean_html)
# image and JavaScript
clean_html = re.sub(r"<div.*background-image.*?>", "", clean_html)
clean_html = re.sub(r"<div[^>]*style=\"[^\"]*background-image.*?>", "", clean_html)
return clean_html


+ 1
- 1
MarketPlaces/Utilities/utilities.py View File

@ -315,7 +315,7 @@ def cleanHTML(html):
clean_html = re.sub(r"<embed.*scriptable?>", "", clean_html)
# image and JavaScript
clean_html = re.sub(r"<div.*background-image.*?>", "", clean_html)
clean_html = re.sub(r"<div[^>]*style=\"[^\"]*background-image.*?>", "", clean_html)
return clean_html


+ 0
- 0
MarketPlaces/ViceCity/crawler_selenium.py View File


+ 232
- 0
MarketPlaces/ViceCity/parser.py View File

@ -0,0 +1,232 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
# This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
def tor2door_description_parser(soup):
# Fields to be parsed
vendor = "-1" # 0 *Vendor_Name
success = "-1" # 1 Vendor_Successful_Transactions
rating_vendor = "-1" # 2 Vendor_Rating
name = "-1" # 3 *Product_Name
describe = "-1" # 4 Product_Description
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category
views = "-1" # 8 Product_Number_Of_Views
reviews = "-1" # 9 Product_Number_Of_Reviews
rating_item = "-1" # 10 Product_Rating
addDate = "-1" # 11 Product_AddedDate
BTC = "-1" # 12 Product_BTC_SellingPrice
USD = "-1" # 13 Product_USD_SellingPrice
EURO = "-1" # 14 Product_EURO_SellingPrice
sold = "-1" # 15 Product_QuantitySold
left = "-1" # 16 Product_QuantityLeft
shipFrom = "-1" # 17 Product_ShippedFrom
shipTo = "-1" # 18 Product_ShippedTo
bae = soup.find('div', {'class': "col-9"})
# Finding Product Name
name = bae.find('h2').text
name = name.replace('\n', ' ')
name = name.replace(",", "")
name = name.strip()
mb = bae.findAll('div', {"class": "mb-1"})
# Finding Vendor
vendor = mb[0].text
vendor = vendor.replace(",", "")
vendor = vendor.replace("Sold by:", "")
vendor = vendor.strip()
# # Finding Vendor Rating
# full_stars = bae[2].find_all('i', {'class': "fas fa-star"})
# half_star = bae[2].find('i', {'class': "fas fa-star-half-alt"})
# rating = len(full_stars) + (0.5 if half_star is not None else 0)
# Finding Quantity Sold and Left
temp = mb[4].text.split(',')
sold = temp[0].replace("sold", "")
sold = sold.strip()
left = temp[1].replace("in stock", "")
left = left.strip()
# Finding USD
USD = bae.find('div', {"class": "h3 text-secondary"}).text
USD = USD.replace("$", "")
USD = USD.strip()
# Finding BTC
temp = bae.find('div', {"class": "small"}).text.split("BTC")
BTC = temp[0].strip()
# shipping_info = bae[4].text
# if "Digital" not in shipping_info:
# shipping_info = shipping_info.split(" ")
#
# # Finding Shipment Information (Origin)
# shipFrom = shipping_info[0].strip()
#
# # Finding Shipment Information (Destination)
# shipTo = shipping_info[1].strip()
# Finding the Product description
describe = bae.find('div', {"class": "card border-top-0"}).text
describe = describe.replace("\n", " ")
describe = describe.replace("\r", " ")
describe = describe.strip()
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if cve:
CVE = " "
for idx in cve:
CVE += (idx)
CVE += " "
CVE = CVE.replace(',', ' ')
CVE = CVE.replace('\n', '')
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
if ms:
MS = " "
for im in ms:
MS += (im)
MS += " "
MS = MS.replace(',', ' ')
MS = MS.replace('\n', '')
# Populating the final variable (this should be a list with all fields scraped)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo)
# Sending the results
return row
# This is the method to parse the Listing Pages
def tor2door_listing_parser(soup):
# Fields to be parsed
nm = 0 # *Total_Products (Should be Integer)
mktName = "Tor2door" # 0 *Marketplace_Name
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating
success = [] # 3 Vendor_Successful_Transactions
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 6 Product_MS_Classification (Microsoft Security)
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description
views = [] # 9 Product_Number_Of_Views
reviews = [] # 10 Product_Number_Of_Reviews
rating_item = [] # 11 Product_Rating
addDate = [] # 12 Product_AddDate
BTC = [] # 13 Product_BTC_SellingPrice
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice
sold = [] # 16 Product_QuantitySold
qLeft = [] # 17 Product_QuantityLeft
shipFrom = [] # 18 Product_ShippedFrom
shipTo = [] # 19 Product_ShippedTo
href = [] # 20 Product_Links
listing = soup.findAll('div', {"class": "card product-card mb-3"})
# Populating the Number of Products
nm = len(listing)
# Finding Category
cat = soup.find("div", {"class": "col-9"})
cat = cat.find("h2").text
cat = cat.replace("Category: ", "")
cat = cat.replace(",", "")
cat = cat.strip()
for card in listing:
category.append(cat)
bae = card.findAll('a')
# Adding the url to the list of urls
link = bae[0].get('href')
href.append(link)
# Finding Product Name
product = bae[1].text
product = product.replace('\n', ' ')
product = product.replace(",", "")
product = product.strip()
name.append(product)
# Finding Vendor
vendor_name = bae[2].text
vendor_name = vendor_name.replace(",", "")
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding USD
usd = card.find('div', {"class": "mb-1"}).text
usd = usd.replace("$", "")
usd = usd.strip()
USD.append(usd)
# Finding Reviews
num = card.find("span", {"class": "rate-count"}).text
num = num.replace("(", "")
num = num.replace("review)", "")
num = num.replace("reviews)", "")
num = num.strip()
reviews.append(num)
# Searching for CVE and MS categories
cve = card.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue = "-1"
else:
cee = " "
for idx in cve:
cee += (idx)
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cveValue = cee
CVE.append(cveValue)
ms = card.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue = "-1"
else:
me = " "
for im in ms:
me += (im)
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue = me
MS.append(MSValue)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
def tor2door_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing = soup.findAll('div', {"class": "card product-card mb-3"})
for div in listing:
link = div.find('a')['href']
href.append(link)
return href

Loading…
Cancel
Save