Browse Source

Merge remote-tracking branch 'origin/main'

main
westernmeadow 11 months ago
parent
commit
5c25855ce6
1 changed files with 164 additions and 113 deletions
  1. +164
    -113
      MarketPlaces/Kerberos/crawler_selenium.py

+ 164
- 113
MarketPlaces/Kerberos/crawler_selenium.py View File

@ -1,7 +1,9 @@
__author__ = 'DarkWeb'
__author__ = 'Helium'
''' '''
Kerberos Market Crawler (Selenium) Kerberos Market Crawler (Selenium)
able to catch crawlers
''' '''
from selenium import webdriver from selenium import webdriver
@ -29,48 +31,41 @@ baseURL = 'http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion
# Opens Tor Browser, crawls the website # Opens Tor Browser, crawls the website
def startCrawling(): def startCrawling():
opentor()
# marketName = getMarketName()
mktName = getMKTName()
driver = getAccess() driver = getAccess()
if driver != 'down': if driver != 'down':
captcha(driver)
login(driver)
crawlForum(driver)
# new_parse(marketName, False)
closetor(driver)
# Opens Tor Browser
def opentor():
global pid
print("Connecting Tor...")
path = open('../../path.txt').readline().strip()
pro = subprocess.Popen(path)
pid = pro.pid
time.sleep(7.5)
input('Tor Connected. Press ENTER to continue\n')
return
try:
login(driver)
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closeDriver(driver)
# new_parse(mktName, baseURL, True)
def captcha(driver): def captcha(driver):
# wait for captcha page
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div/div/img")))
# too hard to code, requires manual completion
# do captchas manually and then wait
input('Complete CAPTCHA\'s manually then press enter when completed')
# wait for login page
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div[1]/div[2]/div/form/div[10]/button")))
def closeDriver(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Login using premade account credentials and do login captcha manually # Login using premade account credentials and do login captcha manually
def login(driver): def login(driver):
captcha(driver)
#wait for login page #wait for login page
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div[1]/div[2]/div/form/div[10]/button")))
(By.XPATH, "/html/body/div[1]/div[2]")))
input("There may be an enter button you need to press.\npress it now then press enter on the keyboard")
#entering username and password into input boxes #entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[1]') usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[1]')
@ -80,40 +75,43 @@ def login(driver):
#Password here #Password here
passwordBox.send_keys('fishowal') passwordBox.send_keys('fishowal')
input('complete CAPTCHA, press login, and then press enter on keyboard')
# wait for captcha page show up # wait for captcha page show up
# WebDriverWait(driver, 100).until(EC.visibility_of_element_located( # WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
# (By.XPATH, "/html/body/div/img[24]"))) # (By.XPATH, "/html/body/div/img[24]")))
time.sleep(10)
# time.sleep(10)
# save captcha to local # save captcha to local
driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/div[6]').screenshot(
r'..\Kerberos\captcha.png')
# This method will show image in any image viewer
im = Image.open(r'..\Kerberos\captcha.png')
im.show()
# wait until input space show up
inputBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[3]')
# ask user input captcha solution in terminal
userIn = input("Enter solution: ")
# send user solution into the input space
inputBox.send_keys(userIn)
# click the verify(submit) button
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
driver.find_element(by=By.XPATH, value="/html/body/div[1]/div[2]/div/form/div[10]/button").click()
# wait for listing page show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '//*[@id="breadcrumb"]')))
# driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/div[6]').screenshot(
# r'..\Kerberos\captcha.png')
#
# # This method will show image in any image viewer
# im = Image.open(r'..\Kerberos\captcha.png')
#
# im.show()
#
# # wait until input space show up
# inputBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[3]')
# # ask user input captcha solution in terminal
# userIn = input("Enter solution: ")
#
# # send user solution into the input space
# inputBox.send_keys(userIn)
#
# # click the verify(submit) button
# driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# driver.find_element(by=By.XPATH, value="/html/body/div[1]/div[2]/div/form/div[10]/button").click()
#
# # wait for listing page show up (This Xpath may need to change based on different seed url)
# WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
# (By.XPATH, '//*[@id="breadcrumb"]')))
# Returns the name of the website # Returns the name of the website
def getMarketName():
def getMKTName():
name = 'Kerberos' name = 'Kerberos'
return name return name
@ -121,7 +119,6 @@ def getMarketName():
# Return the link of the website # Return the link of the website
def getFixedURL(): def getFixedURL():
url = 'http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion' url = 'http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion'
return url return url
@ -139,12 +136,11 @@ def closetor(driver):
# Creates FireFox 'driver' and configure its 'Profile' # Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket # to use Tor proxy and socket
def createFFDriver(): def createFFDriver():
file = open('../../path.txt', 'r')
lines = file.readlines()
from MarketPlaces.Initialization.markets_mining import config
ff_binary = FirefoxBinary(lines[0].strip())
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(lines[1].strip())
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False) ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
@ -152,7 +148,7 @@ def createFFDriver():
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False) ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)
# ff_prof.set_preference("network.dns.disablePrefetch", True)
# ff_prof.set_preference("network.http.sendRefererHeader", 0) # ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3) ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2) ff_prof.set_preference("browser.download.folderList", 2)
@ -166,30 +162,30 @@ def createFFDriver():
ff_prof.set_preference("javascript.enabled", False) ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences() ff_prof.update_preferences()
service = Service(executable_path=lines[2].strip())
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
driver.maximize_window()
return driver return driver
def getAccess(): def getAccess():
url = getFixedURL() url = getFixedURL()
driver = createFFDriver() driver = createFFDriver()
try: try:
driver.get(url) driver.get(url)
return driver return driver
except: except:
driver.close()
return 'down' return 'down'
# Saves the crawled html page # Saves the crawled html page
def savePage(page, url):
cleanPage = cleanHTML(page)
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
filePath = getFullPathName(url) filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True) os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8')) open(filePath, 'wb').write(cleanPage.encode('utf-8'))
@ -198,15 +194,14 @@ def savePage(page, url):
# Gets the full path of the page to be saved along with its appropriate file name # Gets the full path of the page to be saved along with its appropriate file name
def getFullPathName(url): def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
fileName = getNameFromURL(url) fileName = getNameFromURL(url)
if isDescriptionLink(url): if isDescriptionLink(url):
fullPath = r'..\Kerberos\HTML_Pages\\' + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html'
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else: else:
fullPath = r'..\Kerberos\HTML_Pages\\' + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html'
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
return fullPath return fullPath
@ -225,22 +220,22 @@ def getInterestedLinks():
# Services - Hacking # Services - Hacking
links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/99/block/price-none/ww/ww/1/') links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/99/block/price-none/ww/ww/1/')
# Tutorials - Hacking
links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/122/block/price-none/ww/ww/1/')
# Tutorials - Guides
links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/124/block/price-none/ww/ww/1/')
# Tutorials - Other
links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/126/block/price-none/ww/ww/1/')
# Software and Malware - Botnets
links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/129/block/price-none/ww/ww/1/')
# Software and Malware - Malware
links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/130/block/price-none/ww/ww/1/')
# Software and Malware - Trojans
links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/131/block/price-none/ww/ww/1/')
# Software and Malware - Exploits / Kits
links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/133/block/price-none/ww/ww/1/')
# Software and Malware - Other
links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/136/block/price-none/ww/ww/1/')
# # Tutorials - Hacking
# links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/122/block/price-none/ww/ww/1/')
# # Tutorials - Guides
# links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/124/block/price-none/ww/ww/1/')
# # Tutorials - Other
# links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/126/block/price-none/ww/ww/1/')
# # Software and Malware - Botnets
# links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/129/block/price-none/ww/ww/1/')
# # Software and Malware - Malware
# links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/130/block/price-none/ww/ww/1/')
# # Software and Malware - Trojans
# links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/131/block/price-none/ww/ww/1/')
# # Software and Malware - Exploits / Kits
# links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/133/block/price-none/ww/ww/1/')
# # Software and Malware - Other
# links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/136/block/price-none/ww/ww/1/')
return links return links
@ -248,55 +243,114 @@ def getInterestedLinks():
def crawlForum(driver): def crawlForum(driver):
print("Crawling the Kerberos market") print("Crawling the Kerberos market")
# linksToCrawl = getInterestedLinks()
# visited = set(linksToCrawl)
# initialTime = time.time()
#
# i = 0
# count = 0
# while i < len(linksToCrawl):
# link = linksToCrawl[i]
# print('Crawling :', link)
#
# try:
# try:
# driver.get(link)
# except:
# driver.refresh()
# html = driver.page_source
# savePage(driver, html, link)
#
# has_next_page = True
# while has_next_page:
# list = productPages(html)
# for item in list:
# itemURL = urlparse.urljoin(baseURL, str(item))
# try:
# driver.get(itemURL)
# except:
# driver.refresh()
# savePage(driver,driver.page_source, item)
# driver.back()
# # break
#
# if count == 1:
# count = 0
# break
#
# try:
# # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
# # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[16]
# nav = driver.find_element(by=By.XPATH, value=
# '/html/body/div[3]/div[4]/div[4]/div/div[1]/div[28]')
# a = nav.find_element(by=By.LINK_TEXT, value="Next")
# link = a.get_attribute('href')
#
# if link == "":
# raise NoSuchElementException
# try:
# driver.get(link)
# except:
# driver.refresh()
# html = driver.page_source
# savePage(driver, html, link)
# count += 1
#
# except NoSuchElementException:
# has_next_page = False
#
# except Exception as e:
# print(link, e)
# i += 1
linksToCrawl = getInterestedLinks() linksToCrawl = getInterestedLinks()
visited = set(linksToCrawl)
initialTime = time.time()
i = 0 i = 0
count = 0
while i < len(linksToCrawl): while i < len(linksToCrawl):
link = linksToCrawl[i] link = linksToCrawl[i]
print('Crawling :', link) print('Crawling :', link)
try: try:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(html, link)
has_next_page = True has_next_page = True
count = 0
while has_next_page: while has_next_page:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, link)
list = productPages(html) list = productPages(html)
for item in list: for item in list:
itemURL = urlparse.urljoin(baseURL, str(item)) itemURL = urlparse.urljoin(baseURL, str(item))
try: try:
driver.get(itemURL) driver.get(itemURL)
except: except:
driver.refresh() driver.refresh()
savePage(driver.page_source, item)
savePage(driver, driver.page_source, item)
driver.back() driver.back()
time.sleep(5)
# comment out
# break # break
# comment out
if count == 1: if count == 1:
count = 0
break break
try: try:
# /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
# /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
# /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
# /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[3]
nav = driver.find_element(by=By.XPATH, value= nav = driver.find_element(by=By.XPATH, value=
'/html/body/div[3]/div[4]/div[4]/div/div[1]/div[28]')
'/html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]')
a = nav.find_element(by=By.LINK_TEXT, value="Next") a = nav.find_element(by=By.LINK_TEXT, value="Next")
link = a.get_attribute('href') link = a.get_attribute('href')
if link == "": if link == "":
raise NoSuchElementException raise NoSuchElementException
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(html, link)
count += 1 count += 1
except NoSuchElementException: except NoSuchElementException:
@ -306,9 +360,6 @@ def crawlForum(driver):
print(link, e) print(link, e)
i += 1 i += 1
# finalTime = time.time()
# print finalTime - initialTime
input("Crawling Kerberos market done sucessfully. Press ENTER to continue\n") input("Crawling Kerberos market done sucessfully. Press ENTER to continue\n")


Loading…
Cancel
Save