__author__ = 'DarkWeb'

'''
Bohemia Market Crawler (Selenium)
'''

from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.Bohemia.parser import bohemia_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML

counter = 1
baseURL = 'http://bohemiaobko4cecexkj5xmlaove6yn726dstp5wfw4pojjwp6762paqd.onion/'

# Opens Tor Browser, crawls the website, then parses, then closes tor
#acts like the main method for the crawler, another function at the end of this code calls this function later
def startCrawling():
    mktName = getMKTName()
    driver = getAccess()

    if driver != 'down':
        try:
            captcha(driver)
            login(driver)
            crawlForum(driver)
        except Exception as e:
            print(driver.current_url, e)
        closeDriver(driver)

    new_parse(mktName, False)


def login(driver):
    #wait for login page
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
        (By.XPATH, "/html/body/div/div[4]/div/div/form/input[1]")))

    #click on login page confirmation
    driver.find_element(by=By.XPATH, value="/html/body/div/div[4]/div/div/form/input[1]").click()

    #wait until next page shows up
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
        (By.XPATH, "/html/body/div/div[2]/div/div[2]/div/div[2]/form/div[1]/input")))

    #entering username and password into input boxes
    usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/div[2]/div/div[2]/form/div[1]/input')
    #username here
    usernameBox.send_keys('ct-1234')
    passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/div[2]/div/div[2]/form/div[2]/input')
    #password here
    passwordBox.send_keys('DementedBed123-')
    #session time
    session_select = Select(driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/div[2]/div/div[2]/form/div[3]/select'))
    session_select.select_by_visible_text('300 Minutes')

    '''
    #wait for captcha page to show up
    inputBox = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/div[2]/div/div[2]/form/div[4]/div/input')

    #save captcha to local
    driver.find_element(by=By.XPATH, value='//*[@id="captcha"]').screenshot(r'..\Bohemia\captcha2.png')

    im = Image.open(r'..\Bohemia\captcha2.png')
    im.show()

    #ask user input captcha solution in terminal
    userIn = input("Enter Solution: ")

    #send user solution into input field
    inputBox.send_keys(userIn)

    #click the submit button
    driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/div[2]/div/div[2]/form/div[5]/button').click()
    '''
    input("Press ENTER when CAPTCHA is completed\n")

    #wait for listing page to show up
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
        (By.XPATH, "/html/body/div[2]/div[2]/div[1]/div")))

# Returns the name of the website
#return: name of site in string type
def getMKTName():
    name = 'Bohemia'
    return name


# Returns credentials needed for the mkt
def getCredentials():
    credentials = 'blank blank blank blank cap 0'
    return credentials


# Return the base link of the website
#return: url of base site in string type
def getFixedURL():
    url = 'http://bohemiaobko4cecexkj5xmlaove6yn726dstp5wfw4pojjwp6762paqd.onion/'
    return url


# Closes Tor Browser
#@param: current selenium driver
def closeDriver(driver):
    # global pid
    # os.system("taskkill /pid " + str(pro.pid))
    # os.system("taskkill /t /f /im tor.exe")
    print('Closing Tor...')
    driver.close()
    time.sleep(3)
    return


# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
    from MarketPlaces.Initialization.markets_mining import config

    ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))

    ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
    # ff_prof.set_preference("places.history.enabled", False)
    # ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
    # ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
    # ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
    # ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
    # ff_prof.set_preference("signon.rememberSignons", False)
    # ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
    # ff_prof.set_preference("network.dns.disablePrefetch", True)
    # ff_prof.set_preference("network.http.sendRefererHeader", 0)
    ff_prof.set_preference("permissions.default.image", 3)
    ff_prof.set_preference("browser.download.folderList", 2)
    ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
    ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
    ff_prof.set_preference('network.proxy.type', 1)
    ff_prof.set_preference("network.proxy.socks_version", 5)
    ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
    ff_prof.set_preference('network.proxy.socks_port', 9150)
    ff_prof.set_preference('network.proxy.socks_remote_dns', True)
    ff_prof.set_preference("javascript.enabled", True)
    ff_prof.update_preferences()

    service = Service(config.get('TOR', 'geckodriver_path'))

    driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
    
    driver.maximize_window()

    return driver

#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
#return: return the selenium driver or string 'down'
def getAccess():
    url = getFixedURL()
    driver = createFFDriver()
    try:
        driver.get(url)
        return driver
    except:
        driver.close()
        return 'down'


# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
# then allows for manual solving of captcha in the terminal
#@param: current selenium web driver
def captcha(driver):
    '''
    # wait for captcha page show up (for bohemia it takes A WHILE)
    print("Connecting Bohemia...")
    time.sleep(7.5)
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located((By.XPATH, "/html/body/div/div/form/div")))
    input('Bohemia Connected. Press ENTER to continue\n')

    # save captcha to local
    driver.find_element(by=By.XPATH, value="/html/body/div/div/form/div/div").screenshot(r'..\Bohemia\captcha.png')

    # open method used to open different extension image file
    im = Image.open(r'..\Bohemia\captcha.png')

    # This method will show image in any image viewer
    im.show()

    # Prints link to console since captcha requires the link
    print(getFixedURL())

    # wait until input space show up
    inputBox = driver.find_element(by=By.XPATH, value="/html/body/div/div/form/div/div/input")

    # ask user input captha solution in terminal
    userIn = input("Enter solution: ")

    # send user solution into the input space
    inputBox.send_keys(userIn)

    # click the verify(submit) button
    driver.find_element(by=By.XPATH, value='/html/body/div/div/form/button[1]').click()

    # im.close()
    '''
    input("Press ENTER when CAPTCHA is completed\n")

    # wait for next captcha to show up
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
        (By.XPATH, "/html/body/div/div/form")))

    '''
    for square in range(1,7):

        inputBox = driver.find_element(by=By.XPATH, value=f"/html/body/div/div/form/div[1]/input[{square}]")
        inputBox.click()
        time.sleep(.5)
        # userIn = input("Enter Solution: ")
        # inputBox.send_keys(userIn)

        # Takes screenshot every iteration because after input the captcha changes
        driver.find_element(by=By.XPATH, value="/html/body/div/div/form").screenshot(r'..\Bohemia\captcha1.png')

        # Opens and crops image
        im = Image.open(r'..\Bohemia\captcha1.png')
        im = im.crop(((im.width // 2 - 80), (im.height // 2 - 100), (im.width // 2 + 80), (im.height // 2 + 60)))
        im.show()
        # im.close()

        userIn = input("Enter Solution: ")
        inputBox.send_keys(userIn)

    #locate and press submit button
    driver.find_element(by=By.XPATH, value="/html/body/div/div/form/button[1]").click()
    # driver.find_element(by=By.XPATH, value='/html/body/div/div/form/button[2]')
    '''

    input("Press ENTER when CAPTCHA is completed\n")

    #wait for next page to show up
    WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
        (By.XPATH, "/html/body/div/div[4]/div/div/form/input[1]")))

# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(driver, page, url):
    cleanPage = cleanHTML(driver, page)
    filePath = getFullPathName(url)
    os.makedirs(os.path.dirname(filePath), exist_ok=True)
    open(filePath, 'wb').write(cleanPage.encode('utf-8'))
    return


# Gets the full path of the page to be saved along with its appropriate file name
#@param: raw url as crawler crawls through every site
def getFullPathName(url):
    from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE

    mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
    fileName = getNameFromURL(url)
    if isDescriptionLink(url):
        fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
    else:
        fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
    return fullPath


# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
#@param: raw url as crawler crawls through every site
def getNameFromURL(url):
    global counter
    name = ''.join(e for e in url if e.isalnum())
    if (name == ''):
        name = str(counter)
        counter = counter + 1
    return name

# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
#in this example, there are a couple of categories some threads fall under such as
# Guides and Tutorials, Digital Products, and Software and Malware
#as you can see they are categories of products
def getInterestedLinks():
    links = []

    # Malware and Botnets
    links.append('http://bohemiaobko4cecexkj5xmlaove6yn726dstp5wfw4pojjwp6762paqd.onion/listings?page=1&type=all&catid=95')
    # #Exploits
    # links.append('http://bohemiaobko4cecexkj5xmlaove6yn726dstp5wfw4pojjwp6762paqd.onion/listings?page=1&type=all&catid=99')
    # #Methods
    # links.append('http://bohemiaobko4cecexkj5xmlaove6yn726dstp5wfw4pojjwp6762paqd.onion/listings?catid=100')
    # #Exploit kits
    # links.append('http://bohemiaobko4cecexkj5xmlaove6yn726dstp5wfw4pojjwp6762paqd.onion/listings?catid=101')
    # #Hacking Software
    # links.append('http://bohemiaobko4cecexkj5xmlaove6yn726dstp5wfw4pojjwp6762paqd.onion/listings?catid=103')


    return links

# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
#topic and description pages are crawled through here, where both types of pages are saved
#@param: selenium driver
def crawlForum(driver):
    print("Crawling the Bohemia Market")

    linksToCrawl = getInterestedLinks()

    i = 0
    while i < len(linksToCrawl):
        link = linksToCrawl[i]
        print('Crawling :', link)
        try:
            has_next_page = True
            count = 0

            while has_next_page:
                try:
                    driver.get(link)
                except:
                    driver.refresh()
                html = driver.page_source
                savePage(driver, html, link)

                list = productPages(html)
                for item in list:
                    itemURL = urlparse.urljoin(baseURL, str(item))
                    try:
                        driver.get(itemURL)
                    except:
                        driver.refresh()
                    savePage(driver, driver.page_source, item)
                    driver.back()

                    # comment out
                    break

                # comment out
                if count == 1:
                    break

                try:
                    nav = driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div[2]/ul')
                    a = nav.find_element(by=By.PARTIAL_LINK_TEXT, value="Next")
                    link = a.get_attribute('href')

                    if link == "":
                        raise NoSuchElementException
                    count += 1

                except NoSuchElementException:
                    has_next_page = False

        except Exception as e:
            print(link, e)
        i += 1

    input("Crawling Bohemia Market done sucessfully. Press ENTER to continue\n")


# Returns 'True' if the link is a description link
#@param: url of any url crawled
#return: true if is a description page, false if not
def isDescriptionLink(url):
    if bool(re.search(r'\blisting\b',url)): # accurate with bohemia
        return True
    return False


# Returns True if the link is a listingPage link
#@param: url of any url crawled
#return: true if is a Listing page, false if not
def isListingLink(url):
    if bool(re.search(r'\blistings\b',url)): # accurate with bohemia
        return True
    return False


# calling the parser to define the links, the html is the url of a link from the list of interested link list
#@param: link from interested link list ie. getInterestingLinks()
#return: list of description links that should be crawled through
def productPages(html):
    soup = BeautifulSoup(html, "html.parser")
    return bohemia_links_parser(soup)


# Drop links that "signout"
def isSignOut(url):
    #absURL = urlparse.urljoin(url.base_url, url.url)
    if 'signout' in url.lower() or 'logout' in url.lower():
        return True

    return False


def crawler():
    startCrawling()
    # print("Crawling and Parsing BestCardingWorld .... DONE!")