__author__ = 'DarkWeb'

'''
DarkFox Forum Crawler (Selenium)
'''

from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

from PIL import Image
import urllib.parse as urlparse
import os, time
from datetime import date
import subprocess
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.DarkFox.parser import darkfox_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML

counter = 1
baseURL = 'http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/'


# Opens Tor Browser, crawls the website, then parses, then closes tor
#acts like the main method for the crawler, another function at the end of this code calls this function later
def startCrawling():
    opentor()
    mktName = getMKTName()
    driver = getAccess()

    if driver != 'down':
        try:
            captcha(driver)
            crawlForum(driver)
        except Exception as e:
            print(driver.current_url, e)
        closetor(driver)

    new_parse(mktName, False)


# Opens Tor Browser
#prompts for ENTER input to continue
def opentor():
    global pid
    print("Connecting Tor...")
    path = open('../../path.txt').readline().strip()
    pro = subprocess.Popen(path)
    pid = pro.pid
    time.sleep(7.5)
    input('Tor Connected. Press ENTER to continue\n')
    return


# Returns the name of the website
#return: name of site in string type
def getMKTName():
    name = 'DarkFox'
    return name


# Returns credentials needed for the mkt
def getCredentials():
    credentials = 'blank blank blank blank cap 0'
    return credentials


# Return the base link of the website
#return: url of base site in string type
def getFixedURL():
    url = 'http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/'
    return url


# Closes Tor Browser
#@param: current selenium driver
def closetor(driver):
    # global pid
    # os.system("taskkill /pid " + str(pro.pid))
    # os.system("taskkill /t /f /im tor.exe")
    print('Closing Tor...')
    driver.close()
    time.sleep(3)
    return


# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
    file = open('../../path.txt', 'r')
    lines = file.readlines()

    ff_binary = FirefoxBinary(lines[0].strip())

    ff_prof = FirefoxProfile(lines[1].strip())
    # ff_prof.set_preference("places.history.enabled", False)
    # ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
    # ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
    # ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
    # ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
    # ff_prof.set_preference("signon.rememberSignons", False)
    # ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
    # ff_prof.set_preference("network.dns.disablePrefetch", True)
    # ff_prof.set_preference("network.http.sendRefererHeader", 0)
    # ff_prof.set_preference("permissions.default.image", 2)
    # ff_prof.set_preference("browser.download.folderList", 2)
    # ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
    # ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
    ff_prof.set_preference('network.proxy.type', 1)
    ff_prof.set_preference("network.proxy.socks_version", 5)
    ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
    ff_prof.set_preference('network.proxy.socks_port', 9150)
    ff_prof.set_preference('network.proxy.socks_remote_dns', True)
    ff_prof.set_preference("javascript.enabled", False)
    ff_prof.update_preferences()

    service = Service(lines[2].strip())

    driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)

    return driver


#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
#return: return the selenium driver or string 'down'
def getAccess():
    url = getFixedURL()
    driver = createFFDriver()
    try:
        driver.get(url)
        return driver
    except:
        driver.close()
        return 'down'


# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
# then allows for manual solving of captcha in the terminal
#@param: current selenium web driver
def captcha(driver):
    # wait for captcha page show up
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located((By.XPATH, "/html/body/div/div/form/button[1]")))

    # save captcha to local
    driver.find_element(by=By.XPATH, value="/html/body/div/div/form/div[1]/div[1]").screenshot(r'..\DarkFox\captcha.png')

    # open method used to open different extension image file
    im = Image.open(r'..\DarkFox\captcha.png')

    # This method will show image in any image viewer
    im.show()

    # wait until input space show up
    inputBox = driver.find_element(by=By.XPATH, value="/html/body/div/div/form/div[1]/div[2]/input")

    # ask user input captha solution in terminal
    userIn = input("Enter solution: ")

    # send user solution into the input space
    inputBox.send_keys(userIn)

    # click the verify(submit) button
    driver.find_element(by=By.XPATH, value="/html/body/div/div/form/button[1]").click()

    # wait for listing page show up (This Xpath may need to change based on different seed url)
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
        (By.XPATH, "/html/body/main/div/div/div[2]/div[1]/div[1]/form/div[1]/h1")))


# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(page, url):
    cleanPage = cleanHTML(page)
    filePath = getFullPathName(url)
    os.makedirs(os.path.dirname(filePath), exist_ok=True)
    open(filePath, 'wb').write(cleanPage.encode('utf-8'))
    return


# Gets the full path of the page to be saved along with its appropriate file name
#@param: raw url as crawler crawls through every site
def getFullPathName(url):
    fileName = getNameFromURL(url)
    if isDescriptionLink(url):
        fullPath = r'..\DarkFox\HTML_Pages\\' + str(
            "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
            "%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html'
    else:
        fullPath = r'..\DarkFox\HTML_Pages\\' + str(
            "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
            "%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html'
    return fullPath


# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
#@param: raw url as crawler crawls through every site
def getNameFromURL(url):
    global counter
    name = ''.join(e for e in url if e.isalnum())
    if (name == ''):
        name = str(counter)
        counter = counter + 1
    return name


# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
#in this example, there are a couple of categories some threads fall under such as
# Guides and Tutorials, Digital Products, and Software and Malware
#as you can see they are categories of products
def getInterestedLinks():
    links = []

    # # Guides and Tutorials
    # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/30739153-1fcd-45cd-b919-072b439c6e06')
    # # Digital Products
    # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/0e384d5f-26ef-4561-b5a3-ff76a88ab781')
    # Software and Malware
    # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/6b71210f-f1f9-4aa3-8f89-bd9ee28f7afc')
    links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/6b71210f-f1f9-4aa3-8f89-bd9ee28f7afc?page=15')
    # # Services
    # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/b9dc5846-5024-421e-92e6-09ba96a03280')
    # # Miscellaneous
    # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/fd1c989b-1a74-4dc0-92b0-67d8c1c487cb')
    # # Hosting and Security
    # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/5233fd6a-72e6-466d-b108-5cc61091cd14')

    return links


# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
#topic and description pages are crawled through here, where both types of pages are saved
#@param: selenium driver
def crawlForum(driver):
    print("Crawling the DarkFox market")

    linksToCrawl = getInterestedLinks()
    # visited = set(linksToCrawl)
    # initialTime = time.time()
    count = 0

    i = 0
    while i < len(linksToCrawl):
        if count >= 500:
            break
        link = linksToCrawl[i]
        print('Crawling :', link)
        try:
            try:
                driver.get(link)
            except:
                driver.refresh()
            html = driver.page_source
            savePage(html, link)

            has_next_page = True
            while has_next_page:
                list = productPages(html)
                for item in list:
                    itemURL = str(item)
                    try:
                        driver.get(itemURL)
                    except:
                        driver.refresh()
                    savePage(driver.page_source, item)
                    driver.back()
                    count += 1

                try:
                    link = driver.find_element(by=By.XPATH, value=
                        '/html/body/main/div/div[2]/div/div[2]/div/div/div/nav/a[2]').get_attribute('href')
                    try:
                        driver.get(link)
                    except:
                        driver.refresh()
                    html = driver.page_source
                    savePage(html, link)
                except NoSuchElementException:
                    has_next_page = False

        except Exception as e:
            print(link, e)
        i += 1

    # finalTime = time.time()
    # print finalTime - initialTime

    input("Crawling BestCardingWorld forum done sucessfully. Press ENTER to continue\n")


# Returns 'True' if the link is a description link
#@param: url of any url crawled
#return: true if is a description page, false if not
def isDescriptionLink(url):
    if 'product' in url:
        return True
    return False


# Returns True if the link is a listingPage link
#@param: url of any url crawled
#return: true if is a Listing page, false if not
def isListingLink(url):
    if 'category' in url:
        return True
    return False


# calling the parser to define the links, the html is the url of a link from the list of interested link list
#@param: link from interested link list ie. getInterestingLinks()
#return: list of description links that should be crawled through
def productPages(html):
    soup = BeautifulSoup(html, "html.parser")
    return darkfox_links_parser(soup)


# Drop links that "signout"
def isSignOut(url):
    #absURL = urlparse.urljoin(url.base_url, url.url)
    if 'signout' in url.lower() or 'logout' in url.lower():
        return True

    return False


def crawler():
    startCrawling()
    # print("Crawling and Parsing BestCardingWorld .... DONE!")