Browse Source

Dark Market Crawler and Parser

main
Isabelle Wang 6 months ago
parent
commit
02adf827cb
2 changed files with 659 additions and 0 deletions
  1. +383
    -0
      MarketPlaces/SilkRoad4/crawler_selenium.py
  2. +276
    -0
      MarketPlaces/SilkRoad4/parser.py

+ 383
- 0
MarketPlaces/SilkRoad4/crawler_selenium.py View File

@ -0,0 +1,383 @@
__author__ = 'Helium'
'''
Silk Road 4 Crawler (Selenium)
'''
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.SilkRoad4.parser import silkroad4_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
counter = 1
baseURL = 'http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/'
# Opens Tor Browser, crawls the website, then parses, then closes tor
#acts like the main method for the crawler, another function at the end of this code calls this function later
def startCrawling():
mktName = getMKTName()
driver = getAccess()
if driver != 'down':
try:
login(driver)
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closeDriver(driver)
new_parse(mktName, baseURL, True)
# Returns the name of the website
#return: name of site in string type
def getMKTName():
name = 'SilkRoad4'
return name
# Return the base link of the website
#return: url of base site in string type
def getFixedURL():
url = 'http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/'
return url
# Closes Tor Browser
#@param: current selenium driver
def closeDriver(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
from MarketPlaces.Initialization.markets_mining import config
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)
ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
driver.maximize_window()
return driver
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
#return: return the selenium driver or string 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
# then allows for manual solving of captcha in the terminal
#If CAPTCHA is incorrect the first time, will automatically re-enter password and username after pressing 'Enter'
#@param: current selenium web driver
def login(driver):
#login urls
login_page1 = driver.current_url
login_page2 = 'http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?login'
# Will automatically re-enter the user/pw if first time CAPTCHA is entered incorrectly.
while(driver.current_url == login_page1 or driver.current_url == login_page2):
# entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div/div/form/div/input[1]')
# Username here
usernameBox.send_keys('SamarIsland')
passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div/div/form/div/input[2]')
# Password here
passwordBox.send_keys('Underthe5e@')
input("Press ENTER if: \n- username and password needs to be re-entered again\n- when CAPTCHA is completed and home page is visible \n")
# wait for listing page show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 100).until(EC.visibility_of_element_located((By.XPATH, '/html/body')))
# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
# Gets the full path of the page to be saved along with its appropriate file name
#@param: raw url as crawler crawls through every site
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
return fullPath
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
#@param: raw url as crawler crawls through every site
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if (name == ''):
name = str(counter)
counter = counter + 1
return name
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
#in this example, there are a couple of categories some threads fall under such as
# Guides and Tutorials, Digital Products, and Software and Malware
#as you can see they are categories of products
def getInterestedLinks():
links = []
#Hacking and Spam Category
#Databases, Access, & Data
links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=12')
#Exploit Kits
links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=13')
#Exploits, Kits, & Vulnerabilities
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=11')
#Hacking Tools & Scripts
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=14')
#Other Hacking & Spam
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=21')
#Phishing Tools & Utilities
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=19')
#RDPs/VNCs
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=18')
#Social Media Boosting
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=20')
#Spam Tools & Scripts
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=15')
#Traffic
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=16')
#Fraud
#Accounts & Bank Drops
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=2')
#CVV & Cards
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=3')
#Carding/Fraud Software
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=8')
#Other Fraud
links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=9')
#Personal Info(SSN/DOB/Fullz?Scans)
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=5')
#Physical Fake IDs
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=7')
#Dumps
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=4')
#Skimmers
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=6')
#Guides & Tutorials
#Fraud Guides
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=137')
#Hacking Guides
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=136')
#Other Guides and tutorial
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=143')
#Security and Anonymity Guides
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=140')
#Social Engineering Guides
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=141')
#Malware
#Keyloggers
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=30')
#Botnets
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=23')
#RATs
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=24')
#Software
#Other Software
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=150')
#Security Software
#Security Software
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=145')
#Cracked Products
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=149')
#Services
#Hacking
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=112')
#Social Engineering
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=116')
#Security
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=113')
#Programming
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=115')
#Carding
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=114')
#Security and Hosting
#VPN
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=129')
#VPN Accounts
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=130')
#Proxies
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=132')
#Bulletproof Hosting
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=126')
#Hosting
#links.append('http://silkroadm5oqzuz3yq7pvwvinvyzeg2wnpnw3t4cyrsggrcgqfy2ptid.onion/?cat=125')
return links
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
#topic and description pages are crawled through here, where both types of pages are saved
#@param: selenium driver
def crawlForum(driver):
print("Crawling the SilkRoad4 market")
linksToCrawl = getInterestedLinks()
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
has_next_page = True
count = 0
while has_next_page:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, linksToCrawl[i] + f"page{count+1}")
list = productPages(html)
countItem = 0
for item in list:
countItem+=1
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver, driver.page_source, item)
driver.back()
if (countItem == 10):
break
#break
# # comment out
# break
#
#comment out
if count == 2:
break
try:
link = driver.find_element(by=By.XPATH, value='/html/body/div/div/table/tbody/tr/td[1]/center/form/input[3]').get_attribute('href')
if link == "":
raise NoSuchElementException
count += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
print("Crawling the SilkRoad4 market done.")
# Returns 'True' if the link is a description link
#@param: url of any url crawled
#return: true if is a description page, false if not
def isDescriptionLink(url):
if '?listing=' in url:
return True
return False
# Returns True if the link is a listingPage link
#@param: url of any url crawled
#return: true if is a Listing page, false if not
def isListingLink(url):
if '?cat=' in url:
return True
return False
# calling the parser to define the links, the html is the url of a link from the list of interested link list
#@param: link from interested link list ie. getInterestingLinks()
#return: list of description links that should be crawled through
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return silkroad4_links_parser(soup)
def crawler():
startCrawling()

+ 276
- 0
MarketPlaces/SilkRoad4/parser.py View File

@ -0,0 +1,276 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
# This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
def silkroad4_description_parser(soup):
# Fields to be parsed
vendor = "-1" # 0 *Vendor_Name
success = "-1" # 1 Vendor_Successful_Transactions
rating_vendor = "-1" # 2 Vendor_Rating
name = "-1" # 3 *Product_Name
describe = "-1" # 4 Product_Description
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category
views = "-1" # 8 Product_Number_Of_Views
reviews = "-1" # 9 Product_Number_Of_Reviews
rating_item = "-1" # 10 Product_Rating
addDate = "-1" # 11 Product_AddedDate
BTC = "-1" # 12 Product_BTC_SellingPrice
USD = "-1" # 13 Product_USD_SellingPrice
EURO = "-1" # 14 Product_EURO_SellingPrice
sold = "-1" # 15 Product_QuantitySold
left = "-1" # 16 Product_QuantityLeft
shipFrom = "-1" # 17 Product_ShippedFrom
shipTo = "-1" # 18 Product_ShippedTo
LTC = "-1" # 19 Product_LTC_SellingPrice
XMR = "-1" # 20 Product_XMR_SellingPrice
image = "-1" # 19 Product_Image
vendor_image = "-1" # 20 Vendor_Image
bae = soup.find('div', {'id': 'cats'})
# Finding Vendor
vendor = bae.find('font', {'color':'blue'}).text
vendor = vendor.strip()
#Finding Product Name
name = bae.find('b', {'style': 'color:#333;'}).text
name = name.strip()
#Finding image
image = bae.find('img')
image = image.get('src')
image = image.split('base64,')[-1]
#Finding Price
temp = bae.find('span').next_sibling
price_list = temp.split("/")
USD = price_list[0].replace("$", "").strip()
BTC = price_list[1].replace("BTC","").strip()
#Not extracted into databse/PGAdmin yet!
LTC = price_list[2].replace("LTC", "").strip()
XMR = price_list[3].replace("XMR", "").strip()
#print(USD, "", BTC,"", LTC,"", XMR)
# Finding Ships From
a = bae.find_all('a',href=True, limit = 2)
shipFrom = a[0].text.strip()
#Finding Category
category = a[1].text.strip()
# Finding the Product description
describe = soup.find('div', {'style': 'color:#555;font-weight:normal;font-size:12px'}).text
describe = cleanString(describe.strip())
#Finding Rating
rate = soup.find('div', {'style': 'padding:0px; margin-bottom:10px; font-size:12px;'})
rate = rate.find('p')
if rate is not None:
rate = rate.text.strip()
#Some descriptions has 0 rating as 'No Rating yet', can convert to -1 for consistency in database
#if(rate is 'No rating yet'):
# rating_item = -1
#Only extract rating part
rating_item = rate[:rate.index('Note')]
#Finding Number of Reviews
table = soup.find('div', {'class': 'table-responsive'})
if table is not None:
num_rev = table.findAll('tr')
reviews = len(num_rev) - 1
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if cve:
CVE = " "
for idx in cve:
CVE += (idx)
CVE += " "
CVE = CVE.replace(',', ' ')
CVE = CVE.replace('\n', '')
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
if ms:
MS = " "
for im in ms:
MS += (im)
MS += " "
MS = MS.replace(',', ' ')
MS = MS.replace('\n', '')
# Populating the final variable (this should be a list with all fields scraped)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
# Sending the results
return row
# This is the method to parse the Listing Pages
def silkroad4_listing_parser(soup):
# Fields to be parsed
nm = 0 # *Total_Products (Should be Integer)
mktName = "SilkRoad4" # 0 *Marketplace_Name
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating
success = [] # 3 Vendor_Successful_Transactions
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 6 Product_MS_Classification (Microsoft Security)
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description
views = [] # 9 Product_Number_Of_Views
reviews = [] # 10 Product_Number_Of_Reviews
rating_item = [] # 11 Product_Rating
addDate = [] # 12 Product_AddDate
BTC = [] # 13 Product_BTC_SellingPrice
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice
sold = [] # 16 Product_QuantitySold
qLeft =[] # 17 Product_QuantityLeft
shipFrom = [] # 18 Product_ShippedFrom
shipTo = [] # 19 Product_ShippedTo
href = [] # 20 Product_Links
LTC = [] # 21 Product_LTC_SellingPrice
XMR = [] # 22 Product_XMR_SellingPrice
image = [] # 19 Product_Image
image_vendor = [] # 20 Vendor_Image
listing = soup.findAll('div', {'style': "padding:10px; width:100%; margin-bottom:5px; margin-top:0px"})
# Populating the Number of Products
nm = len(listing)
# Finding category of listing page
cat = listing[0].find_all('a', href=True)
cat = cat[len(cat)-2].text
cat = cat.replace(",", "")
cat = cat.strip()
for a in listing:
bae = a.findAll('a', href=True)
# Adding the category
category.append(cat)
# Adding the url to the list of urls
link = bae[0].get('href')
link = cleanLink(link)
href.append(link)
#Finding Price
temp = a.find('b', {'style': 'color:#333'})
temp = temp.text
price = temp.split("/")
USD.append(price[0].replace('$', '').strip())
BTC.append(price[1].replace('BTC', '').strip())
#LTC and XMR will not be stored in Pgadmin as of now
LTC.append(price[2].replace('LTC', '').strip())
XMR.append(price[3].replace('XMR', '').strip())
#print(USD, " ", BTC, ' ', LTC, '', XMR)
# Finding the Vendor
ven = a.findAll('b')
v = ven[2].find('font').text
v = v.strip()
vendor.append(v)
# Finding the Product
product = bae[0].text
product = product.replace(",", "")
product = product.replace(",", "")
product = product.replace("...", "")
product = product.strip()
name.append(product)
#Finding ShipFrom
shipf = bae[len(bae)-1].text.strip()
shipFrom.append(shipf)
#Finding image
product_image = a.find('img')
product_image = product_image.get('src')
product_image = product_image.split('base64,')[-1]
image.append(product_image)
# Searching for CVE and MS categories
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue="-1"
else:
cee = " "
for idx in cve:
cee += (idx)
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cveValue=cee
CVE.append(cveValue)
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue="-1"
else:
me = " "
for im in ms:
me += (im)
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue=me
MS.append(MSValue)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image,
image_vendor)
def silkroad4_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
#finds all div with id, vp
divs = soup.findAll('div', {"id": "vp"})
listing = []
#for all div with id:vp, find first a with href
for div in divs:
a_s = div.find('a', href=True)
#if div has no href True, then move on. otherwise, store the a with href into listing
if a_s is not None:
listing.append(a_s)
#loop through listing with a with href and extract/return the href
for a in listing:
link = a['href']
#if '?listing' is in link, then it is a product, so append to the href list
if "?listing" in link:
href.append(link)
return href

Loading…
Cancel
Save