|
__author__ = 'DarkWeb'
|
|
|
|
'''
|
|
ViceCity Market Forum Crawler (Selenium)
|
|
'''
|
|
|
|
from selenium import webdriver
|
|
from selenium.common.exceptions import NoSuchElementException
|
|
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
|
|
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
|
|
from selenium.webdriver.firefox.service import Service
|
|
from selenium.webdriver.support.ui import WebDriverWait
|
|
from selenium.webdriver.support import expected_conditions as EC
|
|
from selenium.webdriver.common.by import By
|
|
|
|
from PIL import Image
|
|
import urllib.parse as urlparse
|
|
import os, time
|
|
from datetime import date
|
|
import subprocess
|
|
import configparser
|
|
import subprocess
|
|
from bs4 import BeautifulSoup
|
|
from MarketPlaces.Initialization.prepare_parser import new_parse
|
|
from MarketPlaces.ViceCity.parser import vicecity_links_parser
|
|
from MarketPlaces.Utilities.utilities import cleanHTML
|
|
|
|
counter = 1
|
|
baseURL = 'http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/'
|
|
|
|
|
|
# Opens Tor Browser, crawls the website, then parses, then closes tor
|
|
#acts like the main method for the crawler, another function at the end of this code calls this function later
|
|
def startCrawling():
|
|
mktName = getMKTName()
|
|
driver = getAccess()
|
|
|
|
if driver != 'down':
|
|
try:
|
|
login(driver)
|
|
crawlForum(driver)
|
|
except Exception as e:
|
|
print(driver.current_url, e)
|
|
closeDriver(driver)
|
|
|
|
new_parse(mktName, baseURL, True)
|
|
|
|
|
|
# Returns the name of the website
|
|
#return: name of site in string type
|
|
def getMKTName():
|
|
name = 'ViceCity'
|
|
return name
|
|
|
|
|
|
# Return the base link of the website
|
|
#return: url of base site in string type
|
|
def getFixedURL():
|
|
url = 'http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/'
|
|
return url
|
|
|
|
|
|
# Closes Tor Browser
|
|
#@param: current selenium driver
|
|
def closeDriver(driver):
|
|
# global pid
|
|
# os.system("taskkill /pid " + str(pro.pid))
|
|
# os.system("taskkill /t /f /im tor.exe")
|
|
print('Closing Tor...')
|
|
driver.close()
|
|
time.sleep(3)
|
|
return
|
|
|
|
|
|
# Creates FireFox 'driver' and configure its 'Profile'
|
|
# to use Tor proxy and socket
|
|
def createFFDriver():
|
|
from MarketPlaces.Initialization.markets_mining import config
|
|
|
|
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
|
|
|
|
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
|
|
# ff_prof.set_preference("places.history.enabled", False)
|
|
# ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
|
|
# ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
|
|
# ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
|
|
# ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
|
|
# ff_prof.set_preference("signon.rememberSignons", False)
|
|
# ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
|
|
# ff_prof.set_preference("network.dns.disablePrefetch", True)
|
|
# ff_prof.set_preference("network.http.sendRefererHeader", 0)
|
|
ff_prof.set_preference("permissions.default.image", 3)
|
|
ff_prof.set_preference("browser.download.folderList", 2)
|
|
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
|
|
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
|
|
ff_prof.set_preference('network.proxy.type', 1)
|
|
ff_prof.set_preference("network.proxy.socks_version", 5)
|
|
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
|
|
ff_prof.set_preference('network.proxy.socks_port', 9150)
|
|
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
|
|
ff_prof.set_preference("javascript.enabled", False)
|
|
ff_prof.update_preferences()
|
|
|
|
service = Service(config.get('TOR', 'geckodriver_path'))
|
|
|
|
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
|
|
|
|
driver.maximize_window()
|
|
|
|
return driver
|
|
|
|
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
|
|
#return: return the selenium driver or string 'down'
|
|
def getAccess():
|
|
url = getFixedURL()
|
|
driver = createFFDriver()
|
|
try:
|
|
driver.get(url)
|
|
return driver
|
|
except:
|
|
driver.close()
|
|
return 'down'
|
|
|
|
|
|
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
|
|
# then allows for manual solving of captcha in the terminal
|
|
#@param: current selenium web driver
|
|
def login(driver):
|
|
# wait for first captcha page to show up (This Xpath may need to change based on different seed url)
|
|
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
|
|
(By.XPATH, "/html/body/div/div/form/div/div[1]")))
|
|
input("Press Enter once captcha done")
|
|
#clicks button after captcha is inputted
|
|
# driver.find_element(by=By.XPATH, value='/html/body/div/div/form/button').click()
|
|
|
|
#wait for login page to show up
|
|
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
|
|
(By.XPATH, '/html/body/div/div/div/form')))
|
|
#puts username into box
|
|
userBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
|
|
userBox.send_keys('ct1234')
|
|
|
|
#waits for second catpcha to be inputted by user
|
|
input("Press Enter once captcha done")
|
|
#clicks on continue
|
|
# driver.find_element(by=By.XPATH, value='/html/body/div/div/div/form/input[2]').click()
|
|
|
|
#waits for password to show
|
|
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
|
|
(By.XPATH, '/html/body/div/div/div/form/div[3]/input')))
|
|
time.sleep(10) # give time for site to catch up
|
|
# puts password into box
|
|
passBox = driver.find_element(by=By.XPATH, value='/html/body/div/div/div/form/div[2]/input')
|
|
passBox.send_keys('DementedBed123-')
|
|
driver.find_element(by=By.XPATH, value='/html/body/div/div/div/form/div[3]/input').click()
|
|
|
|
# wait for pin input to show
|
|
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
|
|
(By.XPATH, '/html/body/div[1]/div/form/span')))
|
|
pinBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div/form/input[1]')
|
|
pinBox.send_keys('12345')
|
|
driver.find_element(by=By.XPATH, value='/html/body/div[1]/div/form/input[2]').click()
|
|
|
|
# waits for main listing page before crawling to ensure everything goes well
|
|
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
|
|
(By.XPATH, '/html/body/div[1]/div/div[2]')))
|
|
|
|
|
|
# Saves the crawled html page, makes the directory path for html pages if not made
|
|
def savePage(driver, page, url):
|
|
cleanPage = cleanHTML(driver, page)
|
|
filePath = getFullPathName(url)
|
|
os.makedirs(os.path.dirname(filePath), exist_ok=True)
|
|
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
|
|
return
|
|
|
|
|
|
# Gets the full path of the page to be saved along with its appropriate file name
|
|
#@param: raw url as crawler crawls through every site
|
|
def getFullPathName(url):
|
|
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
|
|
|
|
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
|
|
fileName = getNameFromURL(url)
|
|
if isDescriptionLink(url):
|
|
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
|
|
else:
|
|
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
|
|
return fullPath
|
|
|
|
|
|
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
|
|
#@param: raw url as crawler crawls through every site
|
|
def getNameFromURL(url):
|
|
global counter
|
|
name = ''.join(e for e in url if e.isalnum())
|
|
if (name == ''):
|
|
name = str(counter)
|
|
counter = counter + 1
|
|
return name
|
|
|
|
|
|
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
|
|
#in this example, there are a couple of categories some threads fall under such as
|
|
# Guides and Tutorials, Digital Products, and Software and Malware
|
|
#as you can see they are categories of products
|
|
def getInterestedLinks():
|
|
links = []
|
|
|
|
# Digital - Fraud Software, Has Hacking and Guides
|
|
links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=150')
|
|
# # Digital - Guides and Tutorials
|
|
# links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=94')
|
|
# # Carding Services
|
|
# links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=155')
|
|
# # Digital - Other (half junk half random stuff like: bots, rats, viruses, and guides)
|
|
# links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=153')
|
|
|
|
return links
|
|
|
|
|
|
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
|
|
#topic and description pages are crawled through here, where both types of pages are saved
|
|
#@param: selenium driver
|
|
def crawlForum(driver):
|
|
print("Crawling the ViceCity Market")
|
|
|
|
linksToCrawl = getInterestedLinks()
|
|
|
|
i = 0
|
|
while i < len(linksToCrawl):
|
|
link = linksToCrawl[i]
|
|
print('Crawling :', link)
|
|
try:
|
|
has_next_page = True
|
|
count = 0
|
|
|
|
while has_next_page:
|
|
try:
|
|
driver.get(link)
|
|
except:
|
|
driver.refresh()
|
|
html = driver.page_source
|
|
savePage(driver, html, link)
|
|
|
|
list = productPages(html)
|
|
for item in list:
|
|
itemURL = urlparse.urljoin(baseURL, str(item))
|
|
try:
|
|
driver.get(itemURL)
|
|
except:
|
|
driver.refresh()
|
|
time.sleep(2.5) # to let page catchup
|
|
savePage(driver, driver.page_source, item)
|
|
time.sleep(2.5) # so site doesnt crash
|
|
driver.back()
|
|
|
|
# comment out
|
|
# break
|
|
|
|
# comment out
|
|
if count == 1:
|
|
break
|
|
|
|
try:
|
|
temp = driver.find_element(by=By.CLASS_NAME, value='pagination')
|
|
link = temp.find_element(by=By.LINK_TEXT, value='Next').get_attribute('href')
|
|
if link == "":
|
|
raise NoSuchElementException
|
|
count += 1
|
|
|
|
except NoSuchElementException:
|
|
has_next_page = False
|
|
|
|
except Exception as e:
|
|
print(link, e)
|
|
i += 1
|
|
|
|
print("Crawling the ViceCity market done.")
|
|
|
|
|
|
# Returns 'True' if the link is a description link
|
|
#@param: url of any url crawled
|
|
#return: true if is a description page, false if not
|
|
def isDescriptionLink(url):
|
|
if 'listing' in url:
|
|
return True
|
|
return False
|
|
|
|
|
|
# Returns True if the link is a listingPage link
|
|
#@param: url of any url crawled
|
|
#return: true if is a Listing page, false if not
|
|
def isListingLink(url):
|
|
if 'category' in url:
|
|
return True
|
|
return False
|
|
|
|
|
|
# calling the parser to define the links, the html is the url of a link from the list of interested link list
|
|
#@param: link from interested link list ie. getInterestingLinks()
|
|
#return: list of description links that should be crawled through
|
|
def productPages(html):
|
|
soup = BeautifulSoup(html, "html.parser")
|
|
return vicecity_links_parser(soup)
|
|
|
|
|
|
def crawler():
|
|
startCrawling()
|
|
# print("Crawling and Parsing BestCardingWorld .... DONE!")
|