Browse Source

Merge branch 'josh-development' into 'main'

Finished Crawler and Parser for ViceCIty

See merge request dw9372422/dw_pipeline_test!3
main
westernmeadow 1 year ago
parent
commit
5045002a7b
10 changed files with 1631 additions and 949 deletions
  1. +1
    -1
      .idea/DW_Pipeline_Test.iml
  2. +1
    -1
      .idea/misc.xml
  3. +1
    -1
      Forums/Utilities/utilities.py
  4. +1002
    -943
      MarketPlaces/Initialization/geckodriver.log
  5. +1
    -1
      MarketPlaces/Initialization/marketsList.txt
  6. +3
    -0
      MarketPlaces/Initialization/markets_mining.py
  7. +6
    -1
      MarketPlaces/Initialization/prepare_parser.py
  8. +1
    -1
      MarketPlaces/Utilities/utilities.py
  9. +333
    -0
      MarketPlaces/ViceCity/crawler_selenium.py
  10. +282
    -0
      MarketPlaces/ViceCity/parser.py

+ 1
- 1
.idea/DW_Pipeline_Test.iml View File

@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4"> <module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager"> <component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" /> <content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="C:\Users\Helium\anaconda3" jdkType="Python SDK" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
<component name="PyNamespacePackagesService"> <component name="PyNamespacePackagesService">


+ 1
- 1
.idea/misc.xml View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="C:\Users\Helium\anaconda3" project-jdk-type="Python SDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="C:\Users\John Wick\anaconda3" project-jdk-type="Python SDK" />
</project> </project>

+ 1
- 1
Forums/Utilities/utilities.py View File

@ -292,7 +292,7 @@ def cleanHTML(html):
clean_html = re.sub(r"<embed.*scriptable?>", "", clean_html) clean_html = re.sub(r"<embed.*scriptable?>", "", clean_html)
# image and JavaScript # image and JavaScript
clean_html = re.sub(r"<div.*background-image.*?>", "", clean_html)
clean_html = re.sub(r"<div[^>]*style=\"[^\"]*background-image.*?>", "", clean_html)
return clean_html return clean_html


+ 1002
- 943
MarketPlaces/Initialization/geckodriver.log
File diff suppressed because it is too large
View File


+ 1
- 1
MarketPlaces/Initialization/marketsList.txt View File

@ -1 +1 @@
M00nkeyMarket
ViceCity

+ 3
- 0
MarketPlaces/Initialization/markets_mining.py View File

@ -20,6 +20,7 @@ from MarketPlaces.Apocalypse.crawler_selenium import crawler as crawlerApocalyps
from MarketPlaces.CityMarket.crawler_selenium import crawler as crawlerCityMarket from MarketPlaces.CityMarket.crawler_selenium import crawler as crawlerCityMarket
from MarketPlaces.DarkMatter.crawler_selenium import crawler as crawlerDarkMatter from MarketPlaces.DarkMatter.crawler_selenium import crawler as crawlerDarkMatter
from MarketPlaces.M00nkeyMarket.crawler_selenium import crawler as crawlerM00nkeyMarket from MarketPlaces.M00nkeyMarket.crawler_selenium import crawler as crawlerM00nkeyMarket
from MarketPlaces.ViceCity.crawler_selenium import crawler as crawlerViceCity
import configparser import configparser
import time import time
@ -106,6 +107,8 @@ if __name__ == '__main__':
crawlerDarkMatter() crawlerDarkMatter()
elif mkt == "M00nkeyMarket": elif mkt == "M00nkeyMarket":
crawlerM00nkeyMarket() crawlerM00nkeyMarket()
elif mkt == "ViceCity":
crawlerViceCity()


+ 6
- 1
MarketPlaces/Initialization/prepare_parser.py View File

@ -10,6 +10,7 @@ from MarketPlaces.Tor2door.parser import *
from MarketPlaces.Apocalypse.parser import * from MarketPlaces.Apocalypse.parser import *
from MarketPlaces.ThiefWorld.parser import * from MarketPlaces.ThiefWorld.parser import *
from MarketPlaces.AnonymousMarketplace.parser import * from MarketPlaces.AnonymousMarketplace.parser import *
from MarketPlaces.ViceCity.parser import *
from MarketPlaces.TorBay.parser import * from MarketPlaces.TorBay.parser import *
from MarketPlaces.M00nkeyMarket.parser import * from MarketPlaces.M00nkeyMarket.parser import *
@ -150,6 +151,8 @@ def new_parse(marketPlace, url, createLog):
rmm = thiefWorld_description_parser(soup) rmm = thiefWorld_description_parser(soup)
elif marketPlace =="AnonymousMarketplace": elif marketPlace =="AnonymousMarketplace":
rmm = anonymousMarketplace_description_parser(soup) rmm = anonymousMarketplace_description_parser(soup)
elif marketPlace == "ViceCity":
rmm = vicecity_description_parser(soup)
elif marketPlace == "TorBay": elif marketPlace == "TorBay":
rmm = torbay_description_parser(soup) rmm = torbay_description_parser(soup)
elif marketPlace == "M00nkeyMarket": elif marketPlace == "M00nkeyMarket":
@ -161,7 +164,7 @@ def new_parse(marketPlace, url, createLog):
# save file address with description record in memory # save file address with description record in memory
detPage[key] = {'rmm': rmm, 'filename': os.path.basename(line2)} detPage[key] = {'rmm': rmm, 'filename': os.path.basename(line2)}
except:
except :
nError += 1 nError += 1
print("There was a problem to parse the file " + line2 + " in the Description section!") print("There was a problem to parse the file " + line2 + " in the Description section!")
@ -207,6 +210,8 @@ def new_parse(marketPlace, url, createLog):
rw = thiefWorld_listing_parser(soup) rw = thiefWorld_listing_parser(soup)
elif marketPlace == "AnonymousMarketplace": elif marketPlace == "AnonymousMarketplace":
rw = anonymousMarketplace_listing_parser(soup) rw = anonymousMarketplace_listing_parser(soup)
elif marketPlace == "ViceCity":
rw = vicecity_listing_parser(soup)
elif marketPlace == "TorBay": elif marketPlace == "TorBay":
rw = torbay_listing_parser(soup) rw = torbay_listing_parser(soup)
elif marketPlace == "M00nkeyMarket": elif marketPlace == "M00nkeyMarket":


+ 1
- 1
MarketPlaces/Utilities/utilities.py View File

@ -315,7 +315,7 @@ def cleanHTML(html):
clean_html = re.sub(r"<embed.*scriptable?>", "", clean_html) clean_html = re.sub(r"<embed.*scriptable?>", "", clean_html)
# image and JavaScript # image and JavaScript
clean_html = re.sub(r"<div.*background-image.*?>", "", clean_html)
clean_html = re.sub(r"<div[^>]*style=\"[^\"]*background-image.*?>", "", clean_html)
return clean_html return clean_html


+ 333
- 0
MarketPlaces/ViceCity/crawler_selenium.py View File

@ -0,0 +1,333 @@
__author__ = 'DarkWeb'
'''
ViceCity Market Forum Crawler (Selenium)
'''
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from PIL import Image
import urllib.parse as urlparse
import os, time
from datetime import date
import subprocess
import configparser
import subprocess
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.ViceCity.parser import vicecity_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
config = configparser.ConfigParser()
config.read('../../setup.ini')
counter = 1
baseURL = 'http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/'
# Opens Tor Browser, crawls the website, then parses, then closes tor
#acts like the main method for the crawler, another function at the end of this code calls this function later
def startCrawling():
opentor()
mktName = getMKTName()
driver = getAccess()
if driver != 'down':
try:
login(driver)
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closetor(driver)
new_parse(mktName, baseURL, False)
# Opens Tor Browser
#prompts for ENTER input to continue
def opentor():
global pid
print("Connecting Tor...")
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
pid = pro.pid
time.sleep(7.5)
input('Tor Connected. Press ENTER to continue\n')
return
# Returns the name of the website
#return: name of site in string type
def getMKTName():
name = 'ViceCity'
return name
# Return the base link of the website
#return: url of base site in string type
def getFixedURL():
url = 'http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/'
return url
# Closes Tor Browser
#@param: current selenium driver
def closetor(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
# ff_prof.set_preference("places.history.enabled", False)
# ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
# ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
# ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
# ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
# ff_prof.set_preference("signon.rememberSignons", False)
# ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
# ff_prof.set_preference("network.dns.disablePrefetch", True)
# ff_prof.set_preference("network.http.sendRefererHeader", 0)
# ff_prof.set_preference("permissions.default.image", 3)
# ff_prof.set_preference("browser.download.folderList", 2)
# ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
# ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
return driver
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
#return: return the selenium driver or string 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
# then allows for manual solving of captcha in the terminal
#@param: current selenium web driver
def login(driver):
# wait for first captcha page to show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div/div/form/div/div[1]")))
input("Press Enter once captcha done (dont press done)")
#clicks button after captcha is inputted
driver.find_element(by=By.XPATH, value='/html/body/div/div/form/button').click()
#wait for login page to show up
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '/html/body/div/div/div/form')))
#puts username into box
userBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
userBox.send_keys('ct1234')
#waits for second catpcha to be inputted by user
input("Press Enter once captcha done (dont press continue)")
#clicks on continue
driver.find_element(by=By.XPATH, value='/html/body/div/div/div/form/input[2]').click()
#waits for password to show
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '/html/body/div/div/div/form/div[3]/input')))
time.sleep(10) # give time for site to catch up
# puts password into box
passBox = driver.find_element(by=By.XPATH, value='/html/body/div/div/div/form/div[2]/input')
passBox.send_keys('DementedBed123-')
driver.find_element(by=By.XPATH, value='/html/body/div/div/div/form/div[3]/input').click()
# wait for pin input to show
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '/html/body/div[1]/div/form/span')))
pinBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div/form/input[1]')
pinBox.send_keys('12345')
driver.find_element(by=By.XPATH, value='/html/body/div[1]/div/form/input[2]').click()
# waits for main listing page before crawling to ensure everything goes well
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '/html/body/div[1]/div/div[2]')))
# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(page, url):
cleanPage = cleanHTML(page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
# Gets the full path of the page to be saved along with its appropriate file name
#@param: raw url as crawler crawls through every site
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
return fullPath
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
#@param: raw url as crawler crawls through every site
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if (name == ''):
name = str(counter)
counter = counter + 1
return name
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
#in this example, there are a couple of categories some threads fall under such as
# Guides and Tutorials, Digital Products, and Software and Malware
#as you can see they are categories of products
def getInterestedLinks():
links = []
# Digital - Fraud Software, Has Hacking and Guides
links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=150')
# Digital - Guides and Tutorials
links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=94')
# Carding Services
links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=155')
# Digital - Other (half junk half random stuff like: bots, rats, viruses, and guides)
links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=153')
return links
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
#topic and description pages are crawled through here, where both types of pages are saved
#@param: selenium driver
def crawlForum(driver):
print("Crawling the ViceCity Market")
linksToCrawl = getInterestedLinks()
visited = set(linksToCrawl)
initialTime = time.time()
count = 0
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(html, link)
has_next_page = True
while has_next_page:
list = productPages(html)
j = 0
for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
time.sleep(2.5) # to let page catchup
savePage(driver.page_source, item)
time.sleep(2.5) # so site doesnt crash
driver.back()
#comment out
# break
# # comment out
# if count == 1:
# count = 0
# break
try:
temp = driver.find_element(by=By.CLASS_NAME, value='pagination')
link = temp.find_element(by=By.LINK_TEXT, value='Next').get_attribute('href')
if link == "":
raise NoSuchElementException
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(html, link)
count += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
# finalTime = time.time()
# print finalTime - initialTime
input("Crawling ViceCity done sucessfully. Press ENTER to continue\n")
# Returns 'True' if the link is a description link
#@param: url of any url crawled
#return: true if is a description page, false if not
def isDescriptionLink(url):
if 'listing' in url:
return True
return False
# Returns True if the link is a listingPage link
#@param: url of any url crawled
#return: true if is a Listing page, false if not
def isListingLink(url):
if 'category' in url:
return True
return False
# calling the parser to define the links, the html is the url of a link from the list of interested link list
#@param: link from interested link list ie. getInterestingLinks()
#return: list of description links that should be crawled through
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return vicecity_links_parser(soup)
def crawler():
startCrawling()
# print("Crawling and Parsing BestCardingWorld .... DONE!")

+ 282
- 0
MarketPlaces/ViceCity/parser.py View File

@ -0,0 +1,282 @@
__author__ = 'DarkWeb'
import re
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
# This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
def vicecity_description_parser(soup):
# Fields to be parsed
vendor = "-1" # 0 *Vendor_Name
success = "-1" # 1 Vendor_Successful_Transactions
rating_vendor = "-1" # 2 Vendor_Rating
name = "-1" # 3 *Product_Name
describe = "-1" # 4 Product_Description
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category
views = "-1" # 8 Product_Number_Of_Views
reviews = "-1" # 9 Product_Number_Of_Reviews
rating_item = "-1" # 10 Product_Rating
addDate = "-1" # 11 Product_AddedDate
BTC = "-1" # 12 Product_BTC_SellingPrice
USD = "-1" # 13 Product_USD_SellingPrice
EURO = "-1" # 14 Product_EURO_SellingPrice
sold = "-1" # 15 Product_QuantitySold
left = "-1" # 16 Product_QuantityLeft
shipFrom = "-1" # 17 Product_ShippedFrom
shipTo = "-1" # 18 Product_ShippedTo
# Finding Product Name
name = soup.find('div', {'class': "listing_info"}).find('div', {'class': "listing_right"})
name = name.find('span', {'style': "font-size:18px;font-weight: bold;color: #fff"}).text
name = name.replace('\n', ' ')
name = name.replace(",", "")
name = name.strip()
# Finding Vendor
vendor = soup.find('div', {'class': "listing_info"})
vendor = vendor.find('div', {'class': "listing_right"})
numbers = vendor.find('a').find('span').text
vendor = vendor.find('a').text
vendor = vendor.replace(numbers, "").strip() # removes numbers at the end of vendor name
# Finding Vendor Rating
rating = soup.find('div', {'class': "listing_info"}).find('div', {'class': "listing_right"}).find('a').get('title')
rating = re.search(r"\d+%", rating)
rating_vendor = rating.group(0).strip()
# Finding Quantity Sold and Left
# temp = mb[4].text.split(',')
#
# sold = temp[0].replace("sold", "")
# sold = sold.strip()
#
# left = temp[1].replace("in stock", "")
# left = left.strip()
# Finding Successful Transactions
success = soup.find('div', {'class': "listing_info"}).find('div', {'class': "listing_right"}).find('a').get('title')
success = re.search(r"\d+(?= sales)", success)
success = success.group(0).strip()
bae = soup.find('pre')
# Finding USD
USD = bae.find('span').text
USD = re.search(r"\$\d+(?:\.\d+)?", USD).group(0)
USD = USD.replace("$", "").strip()
# Finding BTC
BTC = bae.find_all('span')
BTC = re.search(r"\d+(?:\.\d+)?", BTC[1].text).group(0)
BTC = BTC.strip()
# Finding the Product Category
category = soup.find('div', {'class': "listing_info"}).find('div', {'class': "listing_right"})
category = category.find('span', {'style': "font-size:15px;color: #a1a1a1"}).text
category = category.replace("Category:", "").strip()
li = bae.find_all('span')
# Finding Shipment Information (Origin)
shipFrom = li[-4].text.strip()
# Finding Shipment Information (Destination)
shipTo = li[-2].text.strip()
# Finding the Product description
describe = soup.find('p', {
'style': "width:705px;margin-left:-305px;background-color: #242424;border-radius: 3px;border: 1px solid #373737;padding: 5px;"}).text
describe = describe.replace("\n", " ")
describe = describe.strip()
# Finding the Number of Product Reviews
li = soup.find_all('label', {'class': "tc_label threetabs"})
review = li[1].text
review = re.search(r"\d+", review)
if review:
reviews = review.group(0).strip()
else:
reviews = '0'
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if cve:
CVE = " "
for idx in cve:
CVE += (idx)
CVE += " "
CVE = CVE.replace(',', ' ')
CVE = CVE.replace('\n', '')
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
if ms:
MS = " "
for im in ms:
MS += (im)
MS += " "
MS = MS.replace(',', ' ')
MS = MS.replace('\n', '')
# Populating the final variable (this should be a list with all fields scraped)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo)
# Sending the results
return row
# This is the method to parse the Listing Pages
def vicecity_listing_parser(soup):
# Fields to be parsed
nm = 0 # *Total_Products (Should be Integer)
mktName = "ViceCity" # 0 *Marketplace_Name
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating
success = [] # 3 Vendor_Successful_Transactions
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 6 Product_MS_Classification (Microsoft Security)
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description
views = [] # 9 Product_Number_Of_Views
reviews = [] # 10 Product_Number_Of_Reviews
rating_item = [] # 11 Product_Rating
addDate = [] # 12 Product_AddDate
BTC = [] # 13 Product_BTC_SellingPrice
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice
sold = [] # 16 Product_QuantitySold
qLeft = [] # 17 Product_QuantityLeft
shipFrom = [] # 18 Product_ShippedFrom
shipTo = [] # 19 Product_ShippedTo
href = [] # 20 Product_Links
listing = soup.findAll('div', {"class": "wLf"}) # should be 30
# Populating the Number of Products
nm = len(listing)
# # Finding Category
# cat = soup.find("div", {"class": "col-9"})
# cat = cat.find("h2").text
# cat = cat.replace("Category: ", "")
# cat = cat.replace(",", "")
# cat = cat.strip()
for a in listing:
# category.append(cat)
# bae = card.findAll('a')
# Adding the url to the list of urls
link = a.find('div', {"class": "wLfLeft"}).find('a', href=True).get('href')
link = cleanLink(link)
href.append(link)
# Finding the Product Name
product = a.find('div', {"class": "wLfName"}).find('a').text
product = product.replace('\n', ' ')
product = product.replace(",", "")
product = product.replace("...", "")
product = product.strip()
name.append(product)
# Finding the Vendor
vendor_name = a.find('div', {"class": "wLfVendor"}).find('a').text
addedNums = a.find('div', {"class": "wLfVendor"}).find('a').find('span').text # finds numbers added at end
vendor_name = vendor_name.replace(",", "")
vendor_name = vendor_name.replace(addedNums, "") # removes numbers added at end
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding Prices
price = a.find('div', {"class": "wLfPrice"}).find_all('span')
ud = price[0].text.replace(" USD", " ")
# u = ud.replace("$","")
ud = ud.replace(",", "")
u = ud.replace(price[1].text, "")
u = u.strip()
USD.append(u)
bc = price[1].text
bc = re.search(r"\d+(?:\.\d+)?", bc).group(0).strip()
BTC.append(bc)
# # Finding Reviews
# num = card.find("span", {"class": "rate-count"}).text
# num = num.replace("(", "")
# num = num.replace("review)", "")
# num = num.replace("reviews)", "")
# num = num.strip()
# reviews.append(num)
# Finding Successful Transactions
freq = a.find('div', {"class": "wLfVendor"}).find('a').get('title')
freq = re.search(r'\d+(?= sales)', freq).group(0)
freq = freq.strip()
success.append(freq)
# Finding Ship from and ship to
place = a.find('div', {"class": "wLfPrice"})
place = place.find('span', {'style': "font-size: 12px;"}).text
place = place.split('')
varFrom = place[0].strip()
varTo = place[1].strip()
if varFrom == "WW":
varFrom = "Worldwide"
if varTo == "WW":
varTo = "Worldwide"
shipFrom.append(varFrom)
shipTo.append(varTo)
# Searching for CVE and MS categories
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue = "-1"
else:
cee = " "
for idx in cve:
cee += (idx)
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cveValue = cee
CVE.append(cveValue)
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue = "-1"
else:
me = " "
for im in ms:
me += (im)
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue = me
MS.append(MSValue)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
def vicecity_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing = soup.findAll('div', {"class": "wLf"})
for a in listing:
bae = a.find('div', {"class": "wLfLeft"}).find('a', href=True)
link = bae['href']
href.append(link)
return href

Loading…
Cancel
Save