__author__ = 'Helium'

'''
Kerberos Market Crawler (Selenium)

able to catch crawlers 
'''

from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from PIL import Image

import urllib.parse as urlparse
import os, time
from datetime import date
import subprocess
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.Kerberos.parser import kerberos_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML

counter = 1
baseURL = 'http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion'


# Opens Tor Browser, crawls the website
def startCrawling():
    mktName = getMKTName()
    driver = getAccess()

    if driver != 'down':
        try:
            login(driver)
            crawlForum(driver)
        except Exception as e:
            print(driver.current_url, e)
        closeDriver(driver)

    # new_parse(mktName, baseURL, True)

def captcha(driver):
    # do captchas manually and then wait
    input('Complete CAPTCHA\'s manually then press enter when completed')

def closeDriver(driver):
    # global pid
    # os.system("taskkill /pid " + str(pro.pid))
    # os.system("taskkill /t /f /im tor.exe")
    print('Closing Tor...')
    driver.close()
    time.sleep(3)
    return


# Login using premade account credentials and do login captcha manually
def login(driver):
    captcha(driver)
    #wait for login page
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
        (By.XPATH, "/html/body/div[1]/div[2]")))

    input("There may be an enter button you need to press.\npress it now then press enter on the keyboard")

    #entering username and password into input boxes
    usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[1]')
    #Username here
    usernameBox.send_keys('blabri')
    passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[2]')
    #Password here
    passwordBox.send_keys('fishowal')

    input('complete CAPTCHA, press login, and then press enter on keyboard')


    # wait for captcha page show up
    # WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
    #     (By.XPATH, "/html/body/div/img[24]")))
    # time.sleep(10)

    # save captcha to local
    # driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/div[6]').screenshot(
    #     r'..\Kerberos\captcha.png')
    #
    # # This method will show image in any image viewer
    # im = Image.open(r'..\Kerberos\captcha.png')
    #
    # im.show()
    #
    # # wait until input space show up
    # inputBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[3]')

    # # ask user input captcha solution in terminal
    # userIn = input("Enter solution: ")
    #
    # # send user solution into the input space
    # inputBox.send_keys(userIn)
    #
    # # click the verify(submit) button
    # driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
    # driver.find_element(by=By.XPATH, value="/html/body/div[1]/div[2]/div/form/div[10]/button").click()
    #
    # # wait for listing page show up (This Xpath may need to change based on different seed url)
    # WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
    #     (By.XPATH, '//*[@id="breadcrumb"]')))


# Returns the name of the website
def getMKTName():
    name = 'Kerberos'
    return name


# Return the link of the website
def getFixedURL():
    url = 'http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion'
    return url


# Closes Tor Browser
def closetor(driver):
    # global pid
    # os.system("taskkill /pid " + str(pro.pid))
    # os.system("taskkill /t /f /im tor.exe")
    print('Closing Tor...')
    driver.quit()
    time.sleep(3)
    return


# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
    from MarketPlaces.Initialization.markets_mining import config

    ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))

    ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
    ff_prof.set_preference("places.history.enabled", False)
    ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
    ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
    ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
    ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
    ff_prof.set_preference("signon.rememberSignons", False)
    ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
    # ff_prof.set_preference("network.dns.disablePrefetch", True)
    # ff_prof.set_preference("network.http.sendRefererHeader", 0)
    ff_prof.set_preference("permissions.default.image", 3)
    ff_prof.set_preference("browser.download.folderList", 2)
    ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
    ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
    ff_prof.set_preference('network.proxy.type', 1)
    ff_prof.set_preference("network.proxy.socks_version", 5)
    ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
    ff_prof.set_preference('network.proxy.socks_port', 9150)
    ff_prof.set_preference('network.proxy.socks_remote_dns', True)
    ff_prof.set_preference("javascript.enabled", False)
    ff_prof.update_preferences()

    service = Service(config.get('TOR', 'geckodriver_path'))

    driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)

    driver.maximize_window()

    return driver


def getAccess():
    url = getFixedURL()
    driver = createFFDriver()
    try:
        driver.get(url)
        return driver

    except:
        driver.close()
        return 'down'


# Saves the crawled html page
def savePage(driver, page, url):
    cleanPage = cleanHTML(driver, page)
    filePath = getFullPathName(url)
    os.makedirs(os.path.dirname(filePath), exist_ok=True)
    open(filePath, 'wb').write(cleanPage.encode('utf-8'))
    return


# Gets the full path of the page to be saved along with its appropriate file name
def getFullPathName(url):
    from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE

    mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
    fileName = getNameFromURL(url)
    if isDescriptionLink(url):
        fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
    else:
        fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
    return fullPath


# Creates the file name from passed URL
def getNameFromURL(url):
    global counter
    name = ''.join(e for e in url if e.isalnum())
    if name == '':
        name = str(counter)
        counter = counter + 1
    return name


def getInterestedLinks():
    links = []

    # Services - Hacking
    links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/99/block/price-none/ww/ww/1/')
    # # Tutorials - Hacking
    # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/122/block/price-none/ww/ww/1/')
    # # Tutorials - Guides
    # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/124/block/price-none/ww/ww/1/')
    # # Tutorials - Other
    # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/126/block/price-none/ww/ww/1/')
    # # Software and Malware - Botnets
    # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/129/block/price-none/ww/ww/1/')
    # # Software and Malware - Malware
    # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/130/block/price-none/ww/ww/1/')
    # # Software and Malware - Trojans
    # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/131/block/price-none/ww/ww/1/')
    # # Software and Malware - Exploits / Kits
    # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/133/block/price-none/ww/ww/1/')
    # # Software and Malware - Other
    # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/136/block/price-none/ww/ww/1/')

    return links


def crawlForum(driver):
    print("Crawling the Kerberos market")

    # linksToCrawl = getInterestedLinks()
    # visited = set(linksToCrawl)
    # initialTime = time.time()
    #
    # i = 0
    # count = 0
    # while i < len(linksToCrawl):
    #     link = linksToCrawl[i]
    #     print('Crawling :', link)
    #
    #     try:
    #         try:
    #             driver.get(link)
    #         except:
    #             driver.refresh()
    #         html = driver.page_source
    #         savePage(driver, html, link)
    #
    #         has_next_page = True
    #         while has_next_page:
    #             list = productPages(html)
    #             for item in list:
    #                 itemURL = urlparse.urljoin(baseURL, str(item))
    #                 try:
    #                     driver.get(itemURL)
    #                 except:
    #                     driver.refresh()
    #                 savePage(driver,driver.page_source, item)
    #                 driver.back()
    #                 # break
    #
    #             if count == 1:
    #                 count = 0
    #                 break
    #
    #             try:
    #                 # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
    #                 # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[16]
    #                 nav = driver.find_element(by=By.XPATH, value=
    #                     '/html/body/div[3]/div[4]/div[4]/div/div[1]/div[28]')
    #                 a = nav.find_element(by=By.LINK_TEXT, value="Next")
    #                 link = a.get_attribute('href')
    #
    #                 if link == "":
    #                     raise NoSuchElementException
    #                 try:
    #                     driver.get(link)
    #                 except:
    #                     driver.refresh()
    #                 html = driver.page_source
    #                 savePage(driver, html, link)
    #                 count += 1
    #
    #             except NoSuchElementException:
    #                 has_next_page = False
    #
    #     except Exception as e:
    #         print(link, e)
    #     i += 1

    linksToCrawl = getInterestedLinks()

    i = 0
    while i < len(linksToCrawl):
        link = linksToCrawl[i]
        print('Crawling :', link)

        try:
            has_next_page = True
            count = 0

            while has_next_page:
                try:
                    driver.get(link)
                except:
                    driver.refresh()
                html = driver.page_source
                savePage(driver, html, link)
                list = productPages(html)

                for item in list:
                    itemURL = urlparse.urljoin(baseURL, str(item))
                    try:
                        driver.get(itemURL)
                    except:
                        driver.refresh()
                    savePage(driver, driver.page_source, item)
                    driver.back()
                    time.sleep(5)

                    # comment out
                    # break

                # comment out
                if count == 1:
                    break

                try:
                    # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
                    # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
                    # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
                    # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[3]
                    nav = driver.find_element(by=By.XPATH, value=
                    '/html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]')
                    a = nav.find_element(by=By.LINK_TEXT, value="Next")
                    link = a.get_attribute('href')
                    if link == "":
                        raise NoSuchElementException
                    count += 1

                except NoSuchElementException:
                    has_next_page = False

        except Exception as e:
            print(link, e)
        i += 1

    input("Crawling Kerberos market done sucessfully. Press ENTER to continue\n")


# Returns 'True' if the link is Topic link
def isDescriptionLink(url):
    if 'item' in url:
        return True
    return False


# Returns True if the link is a listingPage link
def isListingLink(url):
    if 'categories' in url:
        return True
    return False


# calling the parser to define the links
def productPages(html):
    soup = BeautifulSoup(html, "html.parser")
    return kerberos_links_parser(soup)


def crawler():
    startCrawling()
    # print("Crawling and Parsing BestCardingWorld .... DONE!")