__author__ = 'DarkWeb'

'''
Atlas Marketplace Crawler (Selenium)
'''

from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.Atlas.parser import atlas_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML

counter = 1
baseURL = 'http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion'

def startCrawling():
    mktName = getMKTName()
    # driver = getAccess()
    #
    # if driver != 'down':
    #     try:
    #         login(driver)
    #         crawlForum(driver)
    #     except Exception as e:
    #         print(driver.current_url, e)
    #     closeDriver(driver)

    new_parse(mktName, baseURL, True)


# Returns the name of the website
def getMKTName():
    name = 'Atlas'
    return name


# Return the base link of the website
def getFixedURL():
    url = 'http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion'
    return url


# Closes Tor Browser
def closeDriver(driver):
    # global pid
    # os.system("taskkill /pid " + str(pro.pid))
    # os.system("taskkill /t /f /im tor.exe")
    print('Closing Tor...')
    driver.close()
    time.sleep(3)
    return


# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
    from MarketPlaces.Initialization.markets_mining import config

    ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))

    ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
    ff_prof.set_preference("places.history.enabled", False)
    ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
    ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
    ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
    ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
    ff_prof.set_preference("signon.rememberSignons", False)
    ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
    # ff_prof.set_preference("network.dns.disablePrefetch", True)
    # ff_prof.set_preference("network.http.sendRefererHeader", 0)
    ff_prof.set_preference("permissions.default.image", 3)
    ff_prof.set_preference("browser.download.folderList", 2)
    ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
    ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
    ff_prof.set_preference('network.proxy.type', 1)
    ff_prof.set_preference("network.proxy.socks_version", 5)
    ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
    ff_prof.set_preference('network.proxy.socks_port', 9150)
    ff_prof.set_preference('network.proxy.socks_remote_dns', True)
    ff_prof.set_preference("javascript.enabled", False)
    ff_prof.update_preferences()

    service = Service(config.get('TOR', 'geckodriver_path'))

    driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)

    driver.maximize_window()

    return driver


#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
def getAccess():
    url = getFixedURL()
    driver = createFFDriver()
    try:
        driver.get(url)
        return driver
    except:
        driver.close()
        return 'down'

def agreeToTerms(driver):
    try:
        agree_button = driver.find_element(by=By.XPATH, value='//input[@type="submit" and @name="agree" and @value="Yes, I agree"]')
        agree_button.click()
    except Exception as e:
        print('Problem with clicking agree button', e)

def login(driver):
    # input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
    input("Press ENTER when captcha is solved")
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
        (By.XPATH, '//*[@id="username"]')))

    # entering username and password into input boxes
    usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
    # Username here
    usernameBox.send_keys('atldark')
    passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]')
    # Password here
    passwordBox.send_keys('a1T2l3dark')

    input("Press ENTER when captcha is solved and you're logged in")
    #
    # try:
    #     agree_button = driver.find_element(by=By.XPATH, value='/html/body/main/div/form/div[6]/button')
    #     agree_button.click()
    # except Exception as e:
    #     print('Problem with clicking login button', e)



def savePage(driver, page, url):
    cleanPage = cleanHTML(driver, page)
    filePath = getFullPathName(url)
    os.makedirs(os.path.dirname(filePath), exist_ok=True)
    open(filePath, 'wb').write(cleanPage.encode('utf-8'))
    return


def getFullPathName(url):
    from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE

    mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
    fileName = getNameFromURL(url)
    if isDescriptionLink(url):
        fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
    else:
        fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
    return fullPath


def getMKTName() -> str:
    name = 'Atlas'
    return name


def getNameFromURL(url):
    global counter
    name = ''.join(e for e in url if e.isalnum())
    if name == '':
        name = str(counter)
        counter = counter + 1
    return name


def getInterestedLinks():
    links = []

    # hacking
    # links.append('http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion/c/hacking')
    # fraud
    links.append('http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion/c/fraud')
    # software
    links.append('http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion/c/software')
    # services
    # links.append('http://5jbmfuip2ohxabshoapzy23kz36movs4clpivnsu7lnrn4osazhzdtqd.onion/c/services')

    return links


def crawlForum(driver):

    print("Crawling the Atlas market")

    linksToCrawl = getInterestedLinks()

    i = 0
    while i < len(linksToCrawl):
        link = linksToCrawl[i]
        print('Crawling :', link)
        try:
            has_next_page = True
            count = 0

            while has_next_page:
                try:
                    driver.get(link)
                except:
                    driver.refresh()
                html = driver.page_source
                savePage(driver, html, link)
                list = productPages(html)

                for item in list:
                    itemURL = urlparse.urljoin(baseURL, str(item))
                    try:
                        driver.get(itemURL)
                    except:
                        driver.refresh()
                    savePage(driver, driver.page_source, item)
                    driver.back()

                    # comment out
                    break

                # comment out
                # if count == 1:
                #     break

                try:
                    temp = driver.find_element(by=By.XPATH, value='/html/body/div[2]/form/section/div/div/div[2]/div[2]')
                    temp = temp.find_elements(by=By.TAG_NAME, value='a')
                    a_tag = temp[-1]

                    if a_tag:
                        temp = a_tag.find_element(by=By.CLASS_NAME, value='double-arrow-right')
                        if temp:
                            link = a_tag.get_attribute('href')
                        else:
                            link = ''

                    if link == "":
                        raise NoSuchElementException
                    count += 1

                except NoSuchElementException:
                    has_next_page = False

        except Exception as e:
            print(link, e)
        i += 1

    print("Crawling the vortex market done.")


# Returns 'True' if the link is Topic link, may need to change for every website
def isDescriptionLink(url):
    if 'product' in url:
        return True
    return False


# Returns True if the link is a listingPage link, may need to change for every website
def isListingLink(url):
    if '/c/' in url:
        return True
    return False


def productPages(html):
    soup = BeautifulSoup(html, "html.parser")
    return atlas_links_parser(soup)


def crawler():
    startCrawling()