__author__ = 'Helium'
|
|
|
|
'''
|
|
CypherMarketplace Forum Crawler (Selenium)
|
|
crawler done
|
|
'''
|
|
|
|
from selenium import webdriver
|
|
from selenium.common.exceptions import NoSuchElementException
|
|
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
|
|
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
|
|
from selenium.webdriver.firefox.service import Service
|
|
from selenium.webdriver.support.ui import WebDriverWait
|
|
from selenium.webdriver.support import expected_conditions as EC
|
|
from selenium.webdriver.common.by import By
|
|
|
|
from PIL import Image
|
|
import urllib.parse as urlparse
|
|
import os, re, time
|
|
from datetime import date
|
|
import subprocess
|
|
import configparser
|
|
from bs4 import BeautifulSoup
|
|
from MarketPlaces.Initialization.prepare_parser import new_parse
|
|
from MarketPlaces.CypherMarketplace.parser import cyphermarketplace_links_parser
|
|
from MarketPlaces.Utilities.utilities import cleanHTML
|
|
|
|
counter = 1
|
|
baseURL = 'http://6c5qa2ke2esh6ake6u6yoxjungz2czbbl7hqxl75v5k37frtzhxuk7ad.onion/'
|
|
|
|
|
|
# Opens Tor Browser, crawls the website, then parses, then closes tor
|
|
#acts like the main method for the crawler, another function at the end of this code calls this function later
|
|
def startCrawling():
|
|
opentor()
|
|
# mktName = getMKTName()
|
|
driver = getAccess()
|
|
|
|
if driver != 'down':
|
|
try:
|
|
login(driver)
|
|
crawlForum(driver)
|
|
except Exception as e:
|
|
print(driver.current_url, e)
|
|
closetor(driver)
|
|
|
|
# new_parse(forumName, baseURL, False)
|
|
|
|
|
|
# Opens Tor Browser
|
|
#prompts for ENTER input to continue
|
|
def opentor():
|
|
from MarketPlaces.Initialization.markets_mining import config
|
|
|
|
global pid
|
|
print("Connecting Tor...")
|
|
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
|
|
pid = pro.pid
|
|
time.sleep(7.5)
|
|
input('Tor Connected. Press ENTER to continue\n')
|
|
return
|
|
|
|
|
|
# Returns the name of the website
|
|
#return: name of site in string type
|
|
def getMKTName():
|
|
name = 'CypherMarketplace'
|
|
return name
|
|
|
|
|
|
# Return the base link of the website
|
|
#return: url of base site in string type
|
|
def getFixedURL():
|
|
url = 'http://6c5qa2ke2esh6ake6u6yoxjungz2czbbl7hqxl75v5k37frtzhxuk7ad.onion/'
|
|
return url
|
|
|
|
|
|
# Closes Tor Browser
|
|
#@param: current selenium driver
|
|
def closetor(driver):
|
|
# global pid
|
|
# os.system("taskkill /pid " + str(pro.pid))
|
|
# os.system("taskkill /t /f /im tor.exe")
|
|
print('Closing Tor...')
|
|
driver.close()
|
|
time.sleep(3)
|
|
return
|
|
|
|
|
|
# Creates FireFox 'driver' and configure its 'Profile'
|
|
# to use Tor proxy and socket
|
|
def createFFDriver():
|
|
from MarketPlaces.Initialization.markets_mining import config
|
|
|
|
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
|
|
|
|
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
|
|
ff_prof.set_preference("places.history.enabled", False)
|
|
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
|
|
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
|
|
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
|
|
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
|
|
ff_prof.set_preference("signon.rememberSignons", False)
|
|
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
|
|
ff_prof.set_preference("network.dns.disablePrefetch", True)
|
|
ff_prof.set_preference("network.http.sendRefererHeader", 0)
|
|
ff_prof.set_preference("permissions.default.image", 2)
|
|
ff_prof.set_preference("browser.download.folderList", 2)
|
|
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
|
|
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
|
|
ff_prof.set_preference('network.proxy.type', 1)
|
|
ff_prof.set_preference("network.proxy.socks_version", 5)
|
|
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
|
|
ff_prof.set_preference('network.proxy.socks_port', 9150)
|
|
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
|
|
ff_prof.set_preference("javascript.enabled", False)
|
|
ff_prof.update_preferences()
|
|
|
|
service = Service(config.get('TOR', 'geckodriver_path'))
|
|
|
|
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
|
|
|
|
return driver
|
|
|
|
|
|
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
|
|
#return: return the selenium driver or string 'down'
|
|
def getAccess():
|
|
url = getFixedURL()
|
|
driver = createFFDriver()
|
|
try:
|
|
driver.get(url)
|
|
return driver
|
|
except:
|
|
driver.close()
|
|
return 'down'
|
|
|
|
|
|
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
|
|
# then allows for manual solving of captcha in the terminal
|
|
#@param: current selenium web driver
|
|
def login(driver):
|
|
# wait for page to show up (This Xpath may need to change based on different seed url)
|
|
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
|
|
(By.XPATH, "/html/body/div/div[1]/div/div[1]/div[1]/ul")))
|
|
|
|
# entering username and password into input boxes
|
|
usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/form/table/tbody/tr[2]/td[2]/input')
|
|
# Username here
|
|
usernameBox.send_keys('beachyoga278') # sends string to the username box
|
|
passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/form/table/tbody/tr[3]/td[2]/input')
|
|
# Password here
|
|
passwordBox.send_keys('sunfish278') # sends string to passwordBox
|
|
|
|
input("Press ENTER when CAPTCHA is completed\n")
|
|
|
|
# wait for listing page show up (This Xpath may need to change based on different seed url)
|
|
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
|
|
(By.XPATH, "/html/body/div[2]/div/div/div[1]/div/div/div[1]/div[2]/ul/li[8]/a")))
|
|
|
|
# Saves the crawled html page, makes the directory path for html pages if not made
|
|
def savePage(page, url):
|
|
cleanPage = cleanHTML(page)
|
|
filePath = getFullPathName(url)
|
|
os.makedirs(os.path.dirname(filePath), exist_ok=True)
|
|
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
|
|
return
|
|
|
|
|
|
# Gets the full path of the page to be saved along with its appropriate file name
|
|
#@param: raw url as crawler crawls through every site
|
|
def getFullPathName(url):
|
|
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
|
|
|
|
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
|
|
fileName = getNameFromURL(url)
|
|
if isDescriptionLink(url):
|
|
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
|
|
else:
|
|
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
|
|
return fullPath
|
|
|
|
|
|
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
|
|
#@param: raw url as crawler crawls through every site
|
|
def getNameFromURL(url):
|
|
global counter
|
|
name = ''.join(e for e in url if e.isalnum())
|
|
if (name == ''):
|
|
name = str(counter)
|
|
counter = counter + 1
|
|
return name
|
|
|
|
|
|
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
|
|
#in this example, there are a couple of categories some threads fall under such as
|
|
# Guides and Tutorials, Digital Products, and Software and Malware
|
|
#as you can see they are categories of products
|
|
def getInterestedLinks():
|
|
links = []
|
|
|
|
# software
|
|
links.append('http://6c5qa2ke2esh6ake6u6yoxjungz2czbbl7hqxl75v5k37frtzhxuk7ad.onion/category/040ca140-3cfc-11ea-9364-87edd8c0a63f')
|
|
# # guides
|
|
# links.append('http://6c5qa2ke2esh6ake6u6yoxjungz2czbbl7hqxl75v5k37frtzhxuk7ad.onion/category/35a35d10-3cfb-11ea-9b14-65b8930c1372')
|
|
|
|
return links
|
|
|
|
|
|
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
|
|
#topic and description pages are crawled through here, where both types of pages are saved
|
|
#@param: selenium driver
|
|
def crawlForum(driver):
|
|
print("Crawling the CypherMarketplace market")
|
|
|
|
linksToCrawl = getInterestedLinks()
|
|
visited = set(linksToCrawl)
|
|
initialTime = time.time()
|
|
|
|
count = 0
|
|
i = 0
|
|
while i < len(linksToCrawl):
|
|
link = linksToCrawl[i]
|
|
print('Crawling :', link)
|
|
try:
|
|
try:
|
|
driver.get(link)
|
|
except:
|
|
driver.refresh()
|
|
html = driver.page_source
|
|
savePage(html, link)
|
|
|
|
has_next_page = True
|
|
while has_next_page:
|
|
list = productPages(html)
|
|
for item in list:
|
|
itemURL = urlparse.urljoin(baseURL, str(item))
|
|
try:
|
|
driver.get(itemURL)
|
|
except:
|
|
driver.refresh()
|
|
savePage(driver.page_source, item)
|
|
driver.back()
|
|
|
|
# comment out
|
|
break
|
|
|
|
# comment out
|
|
if count == 1:
|
|
count = 0
|
|
break
|
|
|
|
try:
|
|
temp = driver.find_element(by=By.XPATH, value=
|
|
'/html/body/div[2]/div/div/div[2]/div/nav/ul')
|
|
link = temp.find_element(by=By.TAG_NAME, value='page-link').get_attribute('href')
|
|
if link == "":
|
|
raise NoSuchElementException
|
|
try:
|
|
driver.get(link)
|
|
except:
|
|
driver.refresh()
|
|
html = driver.page_source
|
|
savePage(html, link)
|
|
count += 1
|
|
|
|
except NoSuchElementException:
|
|
has_next_page = False
|
|
|
|
except Exception as e:
|
|
print(link, e)
|
|
i += 1
|
|
|
|
# finalTime = time.time()
|
|
# print finalTime - initialTime
|
|
|
|
input("Crawling CypherMarketplace forum done sucessfully. Press ENTER to continue\n")
|
|
|
|
|
|
# Returns 'True' if the link is a description link
|
|
#@param: url of any url crawled
|
|
#return: true if is a description page, false if not
|
|
def isDescriptionLink(url):
|
|
if 'product' in url:
|
|
return True
|
|
return False
|
|
|
|
|
|
# Returns True if the link is a listingPage link
|
|
#@param: url of any url crawled
|
|
#return: true if is a Listing page, false if not
|
|
def isListingLink(url):
|
|
if 'category' in url:
|
|
return True
|
|
return False
|
|
|
|
|
|
# calling the parser to define the links, the html is the url of a link from the list of interested link list
|
|
#@param: link from interested link list ie. getInterestingLinks()
|
|
#return: list of description links that should be crawled through
|
|
def productPages(html):
|
|
soup = BeautifulSoup(html, "html.parser")
|
|
return cyphermarketplace_links_parser(soup)
|
|
|
|
|
|
# Drop links that "signout"
|
|
# def isSignOut(url):
|
|
# #absURL = urlparse.urljoin(url.base_url, url.url)
|
|
# if 'signout' in url.lower() or 'logout' in url.lower():
|
|
# return True
|
|
#
|
|
# return False
|
|
|
|
|
|
def crawler():
|
|
startCrawling()
|
|
# print("Crawling and Parsing BestCardingWorld .... DONE!")
|