__author__ = 'DarkWeb'

'''
Sonanza Marketplace Crawler (Selenium)
'''

from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.Sonanza.parser import sonanza_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML

counter = 1
baseURL = 'http://sonanzazddbd3lqw2ai6uwmnb4fx7tj7h6hmdwkfqe7bnzgpuvkxqpyd.onion/'


def startCrawling():
    mktName = getMKTName()
    driver = getAccess()

    if driver != 'down':
        try:
            login(driver)
            crawlForum(driver)
        except Exception as e:
            print(driver.current_url, e)
        closeDriver(driver)

    new_parse(mktName, baseURL, True)


# Returns the name of the website
def getMKTName():
    name = 'Sonanza'
    return name


# Return the base link of the website
def getFixedURL():
    url = 'http://sonanzazddbd3lqw2ai6uwmnb4fx7tj7h6hmdwkfqe7bnzgpuvkxqpyd.onion/'
    return url


# Closes Tor Browser
def closeDriver(driver):
    # global pid
    # os.system("taskkill /pid " + str(pro.pid))
    # os.system("taskkill /t /f /im tor.exe")
    print('Closing Tor...')
    driver.close()
    time.sleep(3)
    return


# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
    from MarketPlaces.Initialization.markets_mining import config

    ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))

    ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
    ff_prof.set_preference("places.history.enabled", False)
    ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
    ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
    ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
    ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
    ff_prof.set_preference("signon.rememberSignons", False)
    ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
    ff_prof.set_preference("network.dns.disablePrefetch", True)
    ff_prof.set_preference("network.http.sendRefererHeader", 0)
    ff_prof.set_preference("permissions.default.image", 3)
    ff_prof.set_preference("browser.download.folderList", 2)
    ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
    ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
    ff_prof.set_preference('network.proxy.type', 1)
    ff_prof.set_preference("network.proxy.socks_version", 5)
    ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
    ff_prof.set_preference('network.proxy.socks_port', 9150)
    ff_prof.set_preference('network.proxy.socks_remote_dns', True)
    ff_prof.set_preference("javascript.enabled", False)
    ff_prof.update_preferences()

    service = Service(config.get('TOR', 'geckodriver_path'))

    driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)

    driver.maximize_window()

    return driver


#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
def getAccess():
    url = getFixedURL()
    driver = createFFDriver()
    try:
        driver.get(url)
        return driver
    except:
        driver.close()
        return 'down'


def login(driver):
    # input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
    #
    # # entering username and password into input boxes
    # usernameBox = driver.find_element(by=By.XPATH, value='//input[@name="username"]')
    # # Username here
    # usernameBox.send_keys('aliciamykeys')
    # passwordBox = driver.find_element(by=By.XPATH, value='//input[@name="password"]')
    # # Password here
    # passwordBox.send_keys('aliciawherearemykey$')
    # # session time
    # session_select = Select(driver.find_element(by=By.XPATH, value='/html/body/main/div/div/div/div/div/form/div[4]/div/div[2]/select'))
    # session_select.select_by_visible_text('Session 60min')

    input("Press ENTER when CAPTCHA is completed and listing page loaded\n")

    # wait for listing page show up (This Xpath may need to change based on different seed url)
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
        (By.XPATH, '//*[@id="searchbar"]')))


def savePage(driver, page, url):
    cleanPage = cleanHTML(driver, page)
    filePath = getFullPathName(url)
    os.makedirs(os.path.dirname(filePath), exist_ok=True)
    open(filePath, 'wb').write(cleanPage.encode('utf-8'))
    return


def getFullPathName(url):
    from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE

    mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
    fileName = getNameFromURL(url)
    if isDescriptionLink(url):
        fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
    else:
        fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
    return fullPath


def getNameFromURL(url):
    global counter
    name = ''.join(e for e in url if e.isalnum())
    if name == '':
        name = str(counter)
        counter = counter + 1
    return name


def getInterestedLinks():
    links = []

    # Guides and Tutorials
    links.append('http://sonanzazddbd3lqw2ai6uwmnb4fx7tj7h6hmdwkfqe7bnzgpuvkxqpyd.onion/category/3')
    # Software and Malware
    links.append('http://sonanzazddbd3lqw2ai6uwmnb4fx7tj7h6hmdwkfqe7bnzgpuvkxqpyd.onion/category/4')
    # Fraud
    links.append('http://sonanzazddbd3lqw2ai6uwmnb4fx7tj7h6hmdwkfqe7bnzgpuvkxqpyd.onion/category/5')
    # Digital Products
    links.append('http://sonanzazddbd3lqw2ai6uwmnb4fx7tj7h6hmdwkfqe7bnzgpuvkxqpyd.onion/category/21')
    # Services
    links.append('http://sonanzazddbd3lqw2ai6uwmnb4fx7tj7h6hmdwkfqe7bnzgpuvkxqpyd.onion/category/22')

    return links


def crawlForum(driver):

    print("Crawling the Sonanza market")

    linksToCrawl = getInterestedLinks()

    i = 0
    while i < len(linksToCrawl):
        link = linksToCrawl[i]
        print('Crawling :', link)
        try:
            has_next_page = True
            count = 0

            while has_next_page:
                try:
                    driver.get(link)
                except:
                    driver.refresh()
                html = driver.page_source
                savePage(driver, html, link)

                list = productPages(html)

                for item in list:
                    itemURL = urlparse.urljoin(baseURL, str(item))
                    try:
                        driver.get(itemURL)
                    except:
                        driver.refresh()
                    savePage(driver, driver.page_source, item)
                    driver.back()

                #     # comment out
                #     break
                #
                # # comment out
                # if count == 1:
                #     break

                try:
                    link = driver.find_element(by=By.XPATH, value='//a[contains(text(), "›")]').get_attribute('href')
                    if link == "":
                        raise NoSuchElementException
                    count += 1

                except NoSuchElementException:
                    has_next_page = False

        except Exception as e:
            print(link, e)
        i += 1

    print("Crawling the Sonanza market done.")


# Returns 'True' if the link is Topic link, may need to change for every website
def isDescriptionLink(url):
    if 'article' in url:
        return True
    return False


# Returns True if the link is a listingPage link, may need to change for every website
def isListingLink(url):
    if 'category' in url:
        return True
    return False


def productPages(html):
    soup = BeautifulSoup(html, "html.parser")
    return sonanza_links_parser(soup)


def crawler():
    startCrawling()