Finished HiddenMarket Crawler and Parser and fixed clean_html() bug See merge request dw9372422/dw_pipeline_test!4main
@ -1,4 +1,4 @@ | |||||
<?xml version="1.0" encoding="UTF-8"?> | <?xml version="1.0" encoding="UTF-8"?> | ||||
<project version="4"> | <project version="4"> | ||||
<component name="ProjectRootManager" version="2" project-jdk-name="C:\Users\Helium\anaconda3" project-jdk-type="Python SDK" /> | |||||
<component name="ProjectRootManager" version="2" project-jdk-name="C:\Users\John Wick\anaconda3" project-jdk-type="Python SDK" /> | |||||
</project> | </project> |
@ -0,0 +1,324 @@ | |||||
__author__ = 'DarkWeb' | |||||
''' | |||||
HiddenMarket Market Crawler (Selenium) | |||||
''' | |||||
from selenium import webdriver | |||||
from selenium.common.exceptions import NoSuchElementException | |||||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||||
from selenium.webdriver.firefox.service import Service | |||||
from selenium.webdriver.common.by import By | |||||
from selenium.webdriver.support import expected_conditions as EC | |||||
from selenium.webdriver.support.ui import WebDriverWait | |||||
from PIL import Image | |||||
import urllib.parse as urlparse | |||||
import os, re, time | |||||
import subprocess | |||||
import configparser | |||||
from bs4 import BeautifulSoup | |||||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||||
from MarketPlaces.HiddenMarket.parser import hiddenmarket_links_parser | |||||
from MarketPlaces.Utilities.utilities import cleanHTML | |||||
counter = 1 | |||||
baseURL = 'http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/' | |||||
# Opens Tor Browser, crawls the website | |||||
def startCrawling(): | |||||
opentor() | |||||
marketName = getMKTName() | |||||
driver = getAccess() | |||||
if driver != 'down': | |||||
try: | |||||
login(driver) | |||||
crawlForum(driver) | |||||
except Exception as e: | |||||
print(driver.current_url, e) | |||||
closetor(driver) | |||||
new_parse(marketName, baseURL, False) | |||||
# Opens Tor Browser | |||||
def opentor(): | |||||
from MarketPlaces.Initialization.markets_mining import config | |||||
global pid | |||||
print("Connecting Tor...") | |||||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||||
pid = pro.pid | |||||
time.sleep(7.5) | |||||
input('Tor Connected. Press ENTER to continue\n') | |||||
return | |||||
# Login using premade account credentials and do login captcha manually | |||||
def login(driver): | |||||
# wait for login page | |||||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||||
(By.XPATH, "/html/body/div[3]/div[3]"))) | |||||
# entering username and password into input boxes | |||||
# usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]') | |||||
# Username here | |||||
# usernameBox.send_keys('ct1234') | |||||
# passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]') | |||||
# Password here | |||||
# passwordBox.send_keys('DementedBed1230') | |||||
''' | |||||
# wait for captcha page show up | |||||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||||
(By.XPATH, "/html/body/main/div/div/div/div/div/form/div[3]/div/div[1]/label/img"))) | |||||
# save captcha to local | |||||
driver.find_element(by=By.XPATH, value='/html/body/main/div/div/div/div/div/form/div[3]/div/div[1]/label/img').screenshot( | |||||
r'..\captcha.png') | |||||
# This method will show image in any image viewer | |||||
im = Image.open(r'..\captcha.png') | |||||
im.show() | |||||
# wait until input space show up | |||||
inputBox = driver.find_element(by=By.XPATH, value='//*[@id="captcha"]') | |||||
# ask user input captcha solution in terminal | |||||
userIn = input("Enter solution: ") | |||||
# send user solution into the input space | |||||
inputBox.send_keys(userIn) | |||||
# click the verify(submit) button | |||||
driver.find_element(by=By.XPATH, value="/html/body/main/div/div/div/div/div/form/div[4]/button").click() | |||||
''' | |||||
# input("Press ENTER when CAPTCHA is completed\n") | |||||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||||
# WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||||
# (By.XPATH, '/html/body/main/div/div/div[1]/div/div[1]/div/h5'))) | |||||
# Returns the name of the website | |||||
def getMKTName(): | |||||
name = 'HiddenMarket' | |||||
return name | |||||
# Return the link of the website | |||||
def getFixedURL(): | |||||
url = 'http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/' | |||||
return url | |||||
# Closes Tor Browser | |||||
def closetor(driver): | |||||
# global pid | |||||
# os.system("taskkill /pid " + str(pro.pid)) | |||||
# os.system("taskkill /t /f /im tor.exe") | |||||
print('Closing Tor...') | |||||
driver.quit() | |||||
time.sleep(3) | |||||
return | |||||
# Creates FireFox 'driver' and configure its 'Profile' | |||||
# to use Tor proxy and socket | |||||
def createFFDriver(): | |||||
from MarketPlaces.Initialization.markets_mining import config | |||||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||||
# ff_prof.set_preference("places.history.enabled", False) | |||||
# ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||||
# ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||||
# ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||||
# ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||||
# ff_prof.set_preference("signon.rememberSignons", False) | |||||
# ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||||
# ff_prof.set_preference("network.dns.disablePrefetch", True) | |||||
# ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||||
# ff_prof.set_preference("permissions.default.image", 3) | |||||
# ff_prof.set_preference("browser.download.folderList", 2) | |||||
# ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||||
# ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||||
ff_prof.set_preference('network.proxy.type', 1) | |||||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||||
ff_prof.set_preference("javascript.enabled", False) | |||||
ff_prof.update_preferences() | |||||
service = Service(config.get('TOR', 'geckodriver_path')) | |||||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||||
return driver | |||||
def getAccess(): | |||||
url = getFixedURL() | |||||
driver = createFFDriver() | |||||
try: | |||||
driver.get(url) | |||||
return driver | |||||
except: | |||||
driver.close() | |||||
return 'down' | |||||
# Saves the crawled html page | |||||
def savePage(page, url): | |||||
cleanPage = cleanHTML(page) | |||||
filePath = getFullPathName(url) | |||||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||||
return | |||||
# Gets the full path of the page to be saved along with its appropriate file name | |||||
def getFullPathName(url): | |||||
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE | |||||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages") | |||||
fileName = getNameFromURL(url) | |||||
if isDescriptionLink(url): | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') | |||||
else: | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') | |||||
return fullPath | |||||
# Creates the file name from passed URL | |||||
def getNameFromURL(url): | |||||
global counter | |||||
name = ''.join(e for e in url if e.isalnum()) | |||||
if name == '': | |||||
name = str(counter) | |||||
counter = counter + 1 | |||||
return name | |||||
def getInterestedLinks(): | |||||
links = [] | |||||
# # Civil Software | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/civil_softwares') | |||||
# # Tutorials - Carding | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/carding') | |||||
# # Digital - Hacks | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/hacks') | |||||
# Digital - Exploit Kit | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/exploit_kit') | |||||
# # 0Day | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/0day') | |||||
# Digital Forensics | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/digital_forensics') | |||||
# Tutorials - Mining | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/mining') | |||||
# Tutorials - Worms | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/worms') | |||||
# Tutorials - Viruses | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/viruses') | |||||
# Tutorials - Trojans | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/trojans') | |||||
# Tutorials - Botnets | |||||
links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/botnets') | |||||
return links | |||||
def crawlForum(driver): | |||||
print("Crawling the HiddenMarket market") | |||||
linksToCrawl = getInterestedLinks() | |||||
i = 0 | |||||
while i < len(linksToCrawl): | |||||
link = linksToCrawl[i] | |||||
print('Crawling :', link) | |||||
categoryLink = link | |||||
maxNumPages = 0 # temp value. | |||||
try: | |||||
has_next_page = True | |||||
count = 0 | |||||
pageCount = 1 | |||||
while has_next_page: | |||||
try: | |||||
driver.get(link) | |||||
if pageCount == 1: | |||||
maxNumPages = int(driver.find_element(by=By.CLASS_NAME, value='main') | |||||
.find_element(by=By.CLASS_NAME, value='pages') | |||||
.find_elements(By.CLASS_NAME, value='page')[-1].text) | |||||
except: | |||||
driver.refresh() | |||||
html = driver.page_source | |||||
savePage(html, link) | |||||
list = productPages(html) | |||||
for item in list: | |||||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||||
try: | |||||
driver.get(itemURL) | |||||
except: | |||||
driver.refresh() | |||||
savePage(driver.page_source, item) | |||||
driver.back() | |||||
# comment out | |||||
# break | |||||
# comment out | |||||
# if count == 2: | |||||
# break | |||||
try: | |||||
pageCount += 1 | |||||
if pageCount > maxNumPages: | |||||
raise NoSuchElementException | |||||
pageLink = "/" + str(pageCount) + "/" | |||||
link = categoryLink + pageLink | |||||
count += 1 | |||||
except NoSuchElementException: | |||||
has_next_page = False | |||||
except Exception as e: | |||||
print(link, e) | |||||
i += 1 | |||||
input("Crawling HiddenMarket market done sucessfully. Press ENTER to continue\n") | |||||
# Returns 'True' if the link is Topic link | |||||
def isDescriptionLink(url): | |||||
if 'product' in url: | |||||
return True | |||||
return False | |||||
# Returns True if the link is a listingPage link | |||||
def isListingLink(url): | |||||
if 'category' in url: | |||||
return True | |||||
return False | |||||
# calling the parser to define the links | |||||
def productPages(html): | |||||
soup = BeautifulSoup(html, "html.parser") | |||||
return hiddenmarket_links_parser(soup) | |||||
def crawler(): | |||||
startCrawling() | |||||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,280 @@ | |||||
__author__ = 'DarkWeb' | |||||
# Here, we are importing the auxiliary functions to clean or convert data | |||||
from MarketPlaces.Utilities.utilities import * | |||||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||||
from bs4 import BeautifulSoup | |||||
# This is the method to parse the Description Pages (one page to each Product in the Listing Pages) | |||||
def hiddenmarket_description_parser(soup): | |||||
# Fields to be parsed | |||||
vendor = "-1" # 0 *Vendor_Name | |||||
success = "-1" # 1 Vendor_Successful_Transactions | |||||
rating_vendor = "-1" # 2 Vendor_Rating | |||||
name = "-1" # 3 *Product_Name | |||||
describe = "-1" # 4 Product_Description | |||||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) | |||||
category = "-1" # 7 Product_Category | |||||
views = "-1" # 8 Product_Number_Of_Views | |||||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||||
rating_item = "-1" # 10 Product_Rating | |||||
addDate = "-1" # 11 Product_AddedDate | |||||
BTC = "-1" # 12 Product_BTC_SellingPrice | |||||
USD = "-1" # 13 Product_USD_SellingPrice | |||||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||||
sold = "-1" # 15 Product_QuantitySold | |||||
left = "-1" # 16 Product_QuantityLeft | |||||
shipFrom = "-1" # 17 Product_ShippedFrom | |||||
shipTo = "-1" # 18 Product_ShippedTo | |||||
bae = soup.find('div', {'class': "main"}) | |||||
# Finding Product Name | |||||
name = bae.find('div', {'class': "heading"}).text | |||||
name = name.replace('\n', ' ') | |||||
name = name.replace(",", "") | |||||
name = name.strip() | |||||
mb = bae.find('div', {'class': "information"}).findAll('tr') | |||||
# Finding Vendor | |||||
vendor = mb[1].find('a').text | |||||
vendor = vendor.replace(",", "") | |||||
vendor = vendor.strip() | |||||
# # Finding Vendor Rating | |||||
# full_stars = bae[2].find_all('i', {'class': "fas fa-star"}) | |||||
# half_star = bae[2].find('i', {'class': "fas fa-star-half-alt"}) | |||||
# rating = len(full_stars) + (0.5 if half_star is not None else 0) | |||||
# Finding Quantity Left | |||||
temp = mb[-3].text | |||||
left = temp.replace("Quantity in stock:", "") | |||||
left = left.strip() | |||||
# Finding USD | |||||
USD = mb[0].text | |||||
USD = USD.replace("Price:", "") | |||||
USD = USD.replace("USD", "") | |||||
USD = USD.strip() | |||||
# Finding BTC | |||||
# temp = bae.find('div', {"class": "small"}).text.split("BTC") | |||||
# BTC = temp[0].strip() | |||||
# Finding Shipment Information (Origin) | |||||
shipFrom = mb[2].text | |||||
shipFrom = shipFrom.replace("Seller location:", "") | |||||
shipFrom = shipFrom.strip() | |||||
# Finding Shipment Information (Destination) | |||||
shipTo = mb[3].text | |||||
shipTo = shipTo.replace("Ships to (seller):", "") | |||||
shipTo = shipTo.strip() | |||||
# Finding the Product description | |||||
describe = bae.find('div', {"class": "twotabs"}).find('div', {'class': "tab1"}).text | |||||
describe = describe.replace("\n", " ") | |||||
describe = describe.replace("\r", " ") | |||||
describe = describe.replace("-", " ") | |||||
describe = describe.strip() | |||||
# Finding the Product Category | |||||
category = mb[-4].text | |||||
category = category.replace("Category:", "") | |||||
category = category.strip() | |||||
#Finding the number of reviews | |||||
reviews = bae.find_all('div', {'class': "heading"}) | |||||
reviews = reviews[-2].text | |||||
reviews = reviews.replace("Comments (", "") | |||||
reviews = reviews.replace(")", "") | |||||
# Searching for CVE and MS categories | |||||
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||||
if cve: | |||||
CVE = " " | |||||
for idx in cve: | |||||
CVE += (idx) | |||||
CVE += " " | |||||
CVE = CVE.replace(',', ' ') | |||||
CVE = CVE.replace('\n', '') | |||||
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||||
if ms: | |||||
MS = " " | |||||
for im in ms: | |||||
MS += (im) | |||||
MS += " " | |||||
MS = MS.replace(',', ' ') | |||||
MS = MS.replace('\n', '') | |||||
# Populating the final variable (this should be a list with all fields scraped) | |||||
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, | |||||
BTC, USD, EURO, sold, left, shipFrom, shipTo) | |||||
# Sending the results | |||||
return row | |||||
# This is the method to parse the Listing Pages | |||||
def hiddenmarket_listing_parser(soup): | |||||
# Fields to be parsed | |||||
nm = 0 # *Total_Products (Should be Integer) | |||||
mktName = "HiddenMarket" # 0 *Marketplace_Name | |||||
vendor = [] # 1 *Vendor y | |||||
rating_vendor = [] # 2 Vendor_Rating | |||||
success = [] # 3 Vendor_Successful_Transactions | |||||
name = [] # 4 *Product_Name y | |||||
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||||
MS = [] # 6 Product_MS_Classification (Microsoft Security) | |||||
category = [] # 7 Product_Category y | |||||
describe = [] # 8 Product_Description | |||||
views = [] # 9 Product_Number_Of_Views | |||||
reviews = [] # 10 Product_Number_Of_Reviews | |||||
rating_item = [] # 11 Product_Rating | |||||
addDate = [] # 12 Product_AddDate | |||||
BTC = [] # 13 Product_BTC_SellingPrice | |||||
USD = [] # 14 Product_USD_SellingPrice y | |||||
EURO = [] # 15 Product_EURO_SellingPrice | |||||
sold = [] # 16 Product_QuantitySold | |||||
qLeft = [] # 17 Product_QuantityLeft | |||||
shipFrom = [] # 18 Product_ShippedFrom | |||||
shipTo = [] # 19 Product_ShippedTo | |||||
href = [] # 20 Product_Links | |||||
listing = soup.findAll('div', {"class": "item"}) | |||||
# Populating the Number of Products | |||||
nm = len(listing) | |||||
# Finding Category | |||||
# cat = soup.find("div", {'class': "heading"}).text | |||||
# cat = cat.replace(",", "") | |||||
# cat = cat.strip() | |||||
for card in listing: | |||||
# category.append(cat) | |||||
# Adding the url to the list of urls | |||||
link = card.find_all('a') | |||||
link = link[1].get('href') | |||||
href.append(link) | |||||
# Finding Product Name | |||||
product = card.next_sibling.find('div', {'class': "title"}) | |||||
product = product.text | |||||
product = product.replace('\n', ' ') | |||||
product = product.replace(",", "") | |||||
product = product.strip() | |||||
name.append(product) | |||||
# Finding Vendor | |||||
vendor_name = card.text | |||||
vendor_name = vendor_name.replace(",", "") | |||||
vendor_name = vendor_name.strip() | |||||
vendor.append(vendor_name) | |||||
# Finding USD | |||||
usd = card.next_sibling.find('div', {"class": "buttons"}).find('div', {'class': "price"}).text | |||||
usd = usd.replace("USD", "") | |||||
usd = usd.strip() | |||||
USD.append(usd) | |||||
tb = card.next_sibling.find("div", {"class": "stats"}) | |||||
tb = tb.find_all('td') | |||||
# Finding Reviews | |||||
num = tb[-1].text | |||||
num = num.strip() | |||||
reviews.append(num) | |||||
# Finding Views | |||||
view = tb[-3].text.strip() | |||||
views.append(view) | |||||
# Finding Num of Sales | |||||
sale = tb[-2].text.strip() | |||||
sold.append(sale) | |||||
# Finding Item Rating | |||||
if num == '0': | |||||
item_rating = '-1' | |||||
else: | |||||
item_rating = card.next_sibling.find('div', {'class': 'stats'}).find('div', {'class': "stars2"}) | |||||
item_rating = item_rating.get('style') | |||||
item_rating = item_rating.replace("width:", "") | |||||
item_rating = item_rating.replace("%", "") | |||||
item_rating = (float(item_rating) * 5.0) / 100.0 | |||||
item_rating = "{:.{}f}".format(item_rating, 2) | |||||
rating_item.append(item_rating) | |||||
# Finding shipping info | |||||
shipping = card.next_sibling.find('div', {'class': "shipping"}).text.split('>') | |||||
# SHip from | |||||
origin = shipping[0].strip() | |||||
shipFrom.append(origin) | |||||
#Ship to | |||||
destination = shipping[1].strip() | |||||
shipTo.append(destination) | |||||
# Finding description (site only shows partial description on listing pages) | |||||
# description = card.next_sibling.find('div', {'class': "description"}).text | |||||
# description = description.replace("\n", " ") | |||||
# description = description.replace("\r", " ") | |||||
# description = description.replace("-", " ") | |||||
# description = description.strip() | |||||
# describe.append(description) | |||||
# Searching for CVE and MS categories | |||||
cve = card.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||||
if not cve: | |||||
cveValue = "-1" | |||||
else: | |||||
cee = " " | |||||
for idx in cve: | |||||
cee += (idx) | |||||
cee += " " | |||||
cee = cee.replace(',', ' ') | |||||
cee = cee.replace('\n', '') | |||||
cveValue = cee | |||||
CVE.append(cveValue) | |||||
ms = card.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||||
if not ms: | |||||
MSValue = "-1" | |||||
else: | |||||
me = " " | |||||
for im in ms: | |||||
me += (im) | |||||
me += " " | |||||
me = me.replace(',', ' ') | |||||
me = me.replace('\n', '') | |||||
MSValue = me | |||||
MS.append(MSValue) | |||||
# Populate the final variable (this should be a list with all fields scraped) | |||||
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, | |||||
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href) | |||||
def hiddenmarket_links_parser(soup): | |||||
# Returning all links that should be visited by the Crawler | |||||
href = [] | |||||
listing = soup.findAll('div', {"class": "item"}) | |||||
for div in listing: | |||||
link = div.findAll('a') | |||||
link = link[1] | |||||
link = link['href'] | |||||
href.append(link) | |||||
return href |
@ -1 +1 @@ | |||||
TorMarket | |||||
HiddenMarket |
@ -1,17 +1,15 @@ | |||||
[TOR] | [TOR] | ||||
firefox_binary_path = C:\Users\dabadcuber5\Desktop\Tor Browser\Browser\firefox.exe | |||||
firefox_profile_path = C:\Users\dabadcuber5\Desktop\Tor Browser\Browser\TorBrowser\Data\Browser\profile.default | |||||
geckodriver_path = C:\Users\dabadcuber5\dw_pipeline_test\selenium\geckodriver.exe | |||||
firefox_binary_path = C:\Users\John Wick\Desktop\Tor Browser\Browser\firefox.exe | |||||
firefox_profile_path = C:\Users\John Wick\Desktop\Tor Browser\Browser\TorBrowser\Data\Browser\profile.default | |||||
geckodriver_path = C:\Users\John Wick\PycharmProjects\dw_pipeline_test\selenium\geckodriver.exe | |||||
[Project] | [Project] | ||||
project_directory = C:\Users\dabadcuber5\dw_pipeline_test | |||||
shared_folder = \\Mac\\Shared | |||||
project_directory = C:\Users\John Wick\PycharmProjects\dw_pipeline_test | |||||
shared_folder = Z:\VBoxSvr\VM_Files_ (shared) | |||||
[PostgreSQL] | [PostgreSQL] | ||||
ip = localhost | ip = localhost | ||||
username = postgres | username = postgres | ||||
password = Ilovelucky1! | |||||
password = postgres | |||||
database = darkweb_markets_forums | database = darkweb_markets_forums |