__author__ = 'DarkWeb' ''' Helium Forum Crawler (Selenium) ''' from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.firefox.firefox_profile import FirefoxProfile from selenium.webdriver.firefox.firefox_binary import FirefoxBinary from selenium.webdriver.firefox.service import Service from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from PIL import Image import urllib.parse as urlparse import os, time from datetime import date import subprocess from bs4 import BeautifulSoup from Forums.Initialization.prepare_parser import new_parse from Forums.Helium.parser import helium_links_parser from Forums.Utilities.utilities import cleanHTML counter = 1 baseURL = 'http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/' # Opens Tor Browser, crawls the website def startCrawling(): # opentor() # forumName = getForumName() driver = getAccess() if driver != 'down': login(driver) crawlForum(driver) closetor(driver) # new_parse(forumName, False) # Opens Tor Browser def opentor(): global pid print("Connecting Tor...") path = open('../../path.txt').readline().strip() pro = subprocess.Popen(path) pid = pro.pid time.sleep(7.5) input('Tor Connected. Press ENTER to continue\n') return # Login using premade account credentials and do login captcha manually def login(driver): #wait for login page WebDriverWait(driver, 100).until(EC.visibility_of_element_located( (By.XPATH, "/html/body/div[2]/div/div[1]/div/div/div[2]/form/div[5]/div/button"))) #entering username and password into input boxes usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]') #Username here usernameBox.send_keys('holyre') passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]') #Password here passwordBox.send_keys('PlatinumBorn2') ''' # wait for captcha page show up WebDriverWait(driver, 100).until(EC.visibility_of_element_located( (By.XPATH, '//*[@id="captcha_img"]'))) # save captcha to local driver.find_element(by=By.XPATH, value='//*[@id="captcha_img"]').screenshot(r'..\Helium\captcha.png') # This method will show image in any image viewer im = Image.open(r'..\Helium\captcha.png') im.show() # wait until input space show up inputBox = driver.find_element(by=By.XPATH, value='//*[@id="captcha"]') # ask user input captcha solution in terminal userIn = input("Enter solution: ") # send user solution into the input space inputBox.send_keys(userIn) # click the verify(submit) button driver.find_element(by=By.XPATH, value="/html/body/div[2]/div/div[1]/div/div/div[2]/form/div[5]/div/button").click() ''' input("Press ENTER when CAPTCHA is completed\n") # wait for listing page show up (This Xpath may need to change based on different seed url) WebDriverWait(driver, 50).until(EC.visibility_of_element_located( (By.XPATH, '/html/body/div[2]/div/p'))) # Returns the name of the website def getForumName(): name = 'Helium' return name # Return the link of the website def getFixedURL(): url = 'http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/login' return url # Closes Tor Browser def closetor(driver): global pid # os.system("taskkill /pid " + str(pro.pid)) os.system("taskkill /t /f /im tor.exe") print('Closing Tor...') driver.close() time.sleep(3) return # Creates FireFox 'driver' and configure its 'Profile' # to use Tor proxy and socket def createFFDriver(): file = open('../../path.txt', 'r') lines = file.readlines() ff_binary = FirefoxBinary(lines[0].strip()) ff_prof = FirefoxProfile(lines[1].strip()) ff_prof.set_preference("places.history.enabled", False) ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) ff_prof.set_preference("signon.rememberSignons", False) ff_prof.set_preference("network.cookie.lifetimePolicy", 2) ff_prof.set_preference("network.dns.disablePrefetch", True) ff_prof.set_preference("network.http.sendRefererHeader", 0) # ff_prof.set_preference("permissions.default.image", 2) ff_prof.set_preference("browser.download.folderList", 2) ff_prof.set_preference("browser.download.manager.showWhenStarting", False) ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") ff_prof.set_preference('network.proxy.type', 1) ff_prof.set_preference("network.proxy.socks_version", 5) ff_prof.set_preference('network.proxy.socks', '127.0.0.1') ff_prof.set_preference('network.proxy.socks_port', 9150) ff_prof.set_preference('network.proxy.socks_remote_dns', True) ff_prof.set_preference("javascript.enabled", True) ff_prof.update_preferences() service = Service(lines[2].strip()) driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) return driver def getAccess(): url = getFixedURL() driver = createFFDriver() try: driver.get(url) return driver except: return 'down' # Saves the crawled html page def savePage(page, url): cleanPage = cleanHTML(page) filePath = getFullPathName(url) os.makedirs(os.path.dirname(filePath), exist_ok=True) open(filePath, 'wb').write(cleanPage.encode('utf-8')) return # Gets the full path of the page to be saved along with its appropriate file name def getFullPathName(url): fileName = getNameFromURL(url) if isDescriptionLink(url): fullPath = r'..\Helium\HTML_Pages\\' + str( "%02d" % date.today().month) + str("%02d" % date.today().day) + str( "%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html' else: fullPath = r'..\Helium\HTML_Pages\\' + str( "%02d" % date.today().month) + str("%02d" % date.today().day) + str( "%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html' return fullPath # Creates the file name from passed URL def getNameFromURL(url): global counter name = ''.join(e for e in url if e.isalnum()) if name == '': name = str(counter) counter = counter + 1 return name def getInterestedLinks(): links = [] # General Discussion links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/6') # Anonymity and Security links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/8') # Programming links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/9') # Carding Discussions links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/10') # Hacked Database (free) links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/11') # Hacking tools, exploits and POC links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/17') # Hacked Database links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/12') # Hacking and other Services links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/13') # Selling/Buying Malware, Exploits etc links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/22') # General Tutorials links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/18') # Hacking Tutorials links.append('http://fahue6hb7odzns36vfoi2dqfvqvjq4btt7vo52a67jivmyz6a6h3vzqd.onion/board/19') return links def crawlForum(driver): print("Crawling the Helium forum") linksToCrawl = getInterestedLinks() # visited = set(linksToCrawl) # initialTime = time.time() i = 0 count = 0 while i < len(linksToCrawl): link = linksToCrawl[i] print('Crawling :', link) try: try: driver.get(link) except: driver.refresh() html = driver.page_source savePage(html, link) has_next_page = True while has_next_page: list = topicPages(html) for item in list: itemURL = urlparse.urljoin(baseURL, str(item)) try: driver.get(itemURL) except: driver.refresh() savePage(driver.page_source, item) driver.back() # comment out break # comment out if count == 1: count = 0 break try: bar = driver.find_element(by=By.XPATH, value= '/html/body/div[2]/div/div[3]/ul') li = bar.find_elements(By.TAG_NAME, 'li')[-1] link = li.find_element(By.TAG_NAME, 'a').get_attribute('href') if link == "": raise NoSuchElementException try: driver.get(link) except: driver.refresh() html = driver.page_source savePage(html, link) count += 1 except NoSuchElementException: has_next_page = False except Exception as e: print(link, e.message) i += 1 # finalTime = time.time() # print finalTime - initialTime input("Crawling Helium forum done successfully. Press ENTER to continue\n") # Returns 'True' if the link is Topic link def isDescriptionLink(url): if 'topic' in url: return True return False # Returns True if the link is a listingPage link def isListingLink(url): if 'board' in url: return True return False # calling the parser to define the links def topicPages(html): soup = BeautifulSoup(html, "html.parser") return helium_links_parser(soup) def crawler(): startCrawling() # print("Crawling and Parsing BestCardingWorld .... DONE!")