__author__ = 'Helium' ''' Altenens Forum Crawler (Selenium) ''' from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.firefox.firefox_profile import FirefoxProfile from selenium.webdriver.firefox.firefox_binary import FirefoxBinary from selenium.webdriver.firefox.service import Service from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from PIL import Image import urllib.parse as urlparse import os, re, time from datetime import date import configparser import subprocess from bs4 import BeautifulSoup from Forums.Initialization.prepare_parser import new_parse from Forums.Altenens.parser import altenens_links_parser from Forums.Utilities.utilities import cleanHTML counter = 1 baseURL = 'https://altenens.is/' # Opens Tor Browser, crawls the website def startCrawling(): opentor() # forumName = getForumName() driver = getAccess() if driver != 'down': try: login(driver) crawlForum(driver) except Exception as e: print(driver.current_url, e) closetor(driver) # new_parse(forumName, False) # Opens Tor Browser def opentor(): from Forums.Initialization.forums_mining import config global pid print("Connecting Tor...") pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) pid = pro.pid time.sleep(7.5) input('Tor Connected. Press ENTER to continue\n') return # Login using premade account credentials and do login captcha manually def login(driver): #click login button login = driver.find_element(by=By.XPATH, value='//*[@id="top"]/div[1]/div/div/div/div[1]/a[1]') login.click() #entering username and password into input boxes usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="_xfUid-1-1688066635"]') #Username here usernameBox.send_keys('mylittlepony45')#sends string to the username box passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="_xfUid-2-1688066635"]') #Password here passwordBox.send_keys('johnnyTest@18')# sends string to passwordBox input("Press ENTER when CAPTCHA is completed\n") # wait for listing page show up (This Xpath may need to change based on different seed url) # wait for 50 sec until id = tab_content is found, then cont WebDriverWait(driver, 50).until(EC.visibility_of_element_located( (By.XPATH, '///html/body/div[1]/div[4]/div/div/div[3]/div/div/div[4]/div/div/div[1]/div/div[1]/div[2]/ol/li[1]'))) # Returns the name of the website def getForumName(): name = 'Altenens' return name # Return the link of the website def getFixedURL(): url = 'https://altenens.is/' return url # Closes Tor Browser def closetor(driver): # global pid # os.system("taskkill /pid " + str(pro.pid)) # os.system("taskkill /t /f /im tor.exe") print('Closing Tor...') driver.close() #close tab time.sleep(3) return # Creates FireFox 'driver' and configure its 'Profile' # to use Tor proxy and socket def createFFDriver(): from Forums.Initialization.forums_mining import config ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) ff_prof.set_preference("places.history.enabled", False) ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) ff_prof.set_preference("signon.rememberSignons", False) ff_prof.set_preference("network.cookie.lifetimePolicy", 2) ff_prof.set_preference("network.dns.disablePrefetch", True) ff_prof.set_preference("network.http.sendRefererHeader", 0) ff_prof.set_preference("permissions.default.image", 3) ff_prof.set_preference("browser.download.folderList", 2) ff_prof.set_preference("browser.download.manager.showWhenStarting", False) ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") ff_prof.set_preference('network.proxy.type', 1) ff_prof.set_preference("network.proxy.socks_version", 5) ff_prof.set_preference('network.proxy.socks', '127.0.0.1') ff_prof.set_preference('network.proxy.socks_port', 9150) ff_prof.set_preference('network.proxy.socks_remote_dns', True) ff_prof.set_preference("javascript.enabled", True) ff_prof.update_preferences() service = Service(config.get('TOR', 'geckodriver_path')) driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) return driver def getAccess(): url = getFixedURL() driver = createFFDriver() try: driver.get(url)# open url in browser return driver except: driver.close()# close tab return 'down' # Saves the crawled html page def savePage(page, url): cleanPage = cleanHTML(page) filePath = getFullPathName(url) os.makedirs(os.path.dirname(filePath), exist_ok=True) open(filePath, 'wb').write(cleanPage.encode('utf-8')) return # Gets the full path of the page to be saved along with its appropriate file name def getFullPathName(url): from Forums.Initialization.forums_mining import config, CURRENT_DATE mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages") fileName = getNameFromURL(url) if isDescriptionLink(url): fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') else: fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') return fullPath # Creates the file name from passed URL def getNameFromURL(url): global counter name = ''.join(e for e in url if e.isalnum()) if (name == ''): name = str(counter) counter = counter + 1 return name def getInterestedLinks(): links = [] # # Hacking Tools links.append('https://altenens.is/forums/hacking-tools.469165/') # # hash cracking # links.append('https://altenens.is/forums/hash-cracking.469167/') # # phishing and spamming # links.append('https://altenens.is/forums/phishing-and-spamming.469223/') # # pentesting # links.append('https://altenens.is/forums/pentesting.469169/') # # cracking tools # links.append('https://altenens.is/forums/cracking-tools.469204/') # # Cracking Tools # links.append('https://altenens.is/forums/cracking-tutorials-other-methods.469205/') return links def crawlForum(driver): print("Crawling the Altenens forum") linksToCrawl = getInterestedLinks() visited = set(linksToCrawl) initialTime = time.time() i = 0 count = 0 while i < len(linksToCrawl): link = linksToCrawl[i] print('Crawling :', link) try: try: driver.get(link)# open except: driver.refresh() html = driver.page_source savePage(html, link) has_next_page = True #loop through the topics while has_next_page: list = topicPages(html)# for multiple pages for item in list: #variable to check if there is a next page for the topic has_next_topic_page = True counter = 1 # check if there is a next page for the topics while has_next_topic_page: # try to access next page of th topic itemURL = urlparse.urljoin(baseURL, str(item)) try: driver.get(itemURL) except: driver.refresh() savePage(driver.page_source, item) # if there is a next page then go and save.... # next page in the topic? try: item = driver.find_element(By.XPATH, '/html/body/div[1]/div[4]/div/div/div[3]/div/div/div[1]/div[1]/div[1]/nav/div[1]/a').get_attribute('href') if item == "": raise NoSuchElementException has_next_topic_page = False else: counter += 1 except NoSuchElementException: has_next_topic_page = False #end of loop for i in range(counter): driver.back() # comment out break # comment out if count == 1: count = 0 break try:# change depending on web page, #next page link = driver.find_element(by=By.XPATH, value = '/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/div[1]/div[1]/nav/div[1]/a').get_attribute('href') if link == "": raise NoSuchElementException try: driver.get(link) except: driver.refresh() html = driver.page_source savePage(html, link) count += 1 except NoSuchElementException: has_next_page = False except Exception as e: print(link, e) i += 1 # finalTime = time.time() # print finalTime - initialTime input("Crawling Altenens forum done successfully. Press ENTER to continue\n") # Returns 'True' if the link is Topic link, may need to change for every website def isDescriptionLink(url): if 'threads' in url: return True return False # Returns True if the link is a listingPage link, may need to change for every website def isListingLink(url): if 'forums' in url: return True return False # calling the parser to define the links def topicPages(html): soup = BeautifulSoup(html, "html.parser") #print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text) return altenens_links_parser(soup) def crawler(): startCrawling() # print("Crawling and Parsing BestCardingWorld .... DONE!")