__author__ = 'DarkWeb'

# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *

# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup


# This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
def wethenorth_description_parser(soup):

    # Fields to be parsed

    vendor = "-1"                       # 0 *Vendor_Name
    success = "-1"                      # 1 Vendor_Successful_Transactions
    rating_vendor = "-1"                # 2 Vendor_Rating
    name = "-1"                         # 3 *Product_Name
    describe = "-1"                     # 4 Product_Description
    CVE = "-1"                          # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
    MS = "-1"                           # 6 Product_MS_Classification (Microsoft Security)
    category = "-1"                     # 7 Product_Category
    views = "-1"                        # 8 Product_Number_Of_Views
    reviews = "-1"                      # 9 Product_Number_Of_Reviews
    rating_item = "-1"                  # 10 Product_Rating
    addDate = "-1"                      # 11 Product_AddedDate
    BTC = "-1"                          # 12 Product_BTC_SellingPrice
    USD = "-1"                          # 13 Product_USD_SellingPrice
    EURO = "-1"                         # 14 Product_EURO_SellingPrice
    sold = "-1"                         # 15 Product_QuantitySold
    left = "-1"                         # 16 Product_QuantityLeft
    shipFrom = "-1"                     # 17 Product_ShippedFrom
    shipTo = "-1"                       # 18 Product_ShippedTo
    image = "-1"                        # 19 Product_Image
    vendor_image = "-1"                 # 20 Vendor_Image

    # Finding Product Name
    listDes = soup.find('div', {'class': "listDes"})
    name = listDes.find('h2').text
    name = cleanString(name)
    name = name.strip()

    # Finding Vendor
    vendor = listDes.find('b').text
    vendor = cleanString(vendor)
    vendor = vendor.strip()

    # Finding Vendor Rating
    rating = listDes.find('span', {'class': 'levelSet'})
    rating = rating.text
    rating = cleanNumbers(rating)
    rating_vendor = rating.strip()

    # Finding Prices - all prices in We The North are in CAD, I left the CAD in the resulting String so that it would show CAD for all prices
    padp = listDes.find('p', {'class': 'padp'})
    USD = padp.find('span').text
    USD = USD.strip()

    BTC = padp.find_next_sibling('p').text
    BTC = cleanNumbers(BTC)
    BTC = BTC.strip()

    # Finding Escrow - no escrow on WTN market

    shipping_info = listDes.find('tbody')
    if "Digital" not in shipping_info:
        shipping_info = shipping_info.find_all('tr')
        row1 = shipping_info[0].find_all('td')

        # Finding Shipment Information (Origin)
        shipFrom = row1[-1].text
        shipFrom = cleanString(shipFrom)
        shipFrom = shipFrom.strip()
        if shipFrom == "":
            shipFrom = "-1"

        row2 = shipping_info[1].find_all('td')

        # Finding Shipment Information (Destination)
        shipTo = row2[-1].text
        shipTo = cleanString(shipTo)
        shipTo = shipTo.strip()
        if shipTo == "":
            shipTo = "-1"

    # Finding the Product description
    describe = soup.find("div", {'class': 'tabcontent'})
    describe = describe.find('p').text
    describe = cleanString(describe)
    describe = describe.strip()

    # Searching for CVE and MS categories
    # no CVE or MS for WTN market

    # Populating the final variable (this should be a list with all fields scraped)
    row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
           BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)

    # Sending the results
    return row


# This is the method to parse the Listing Pages
def wethenorth_listing_parser(soup):

    # Fields to be parsed
    nm = 0                                    # *Total_Products (Should be Integer)
    mktName = "WeTheNorth"                    # 0 *Marketplace_Name
    vendor = []                               # 1 *Vendor y
    rating_vendor = []                        # 2 Vendor_Rating
    success = []                              # 3 Vendor_Successful_Transactions
    name = []                                 # 4 *Product_Name y
    CVE = []                                  # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
    MS = []                                   # 6 Product_MS_Classification (Microsoft Security)
    category = []                             # 7 Product_Category y
    describe = []                             # 8 Product_Description
    views = []                                # 9 Product_Number_Of_Views
    reviews = []                              # 10 Product_Number_Of_Reviews
    rating_item = []                          # 11 Product_Rating
    addDate = []                              # 12 Product_AddDate
    BTC = []                                  # 13 Product_BTC_SellingPrice
    USD = []                                  # 14 Product_USD_SellingPrice y
    EURO = []                                 # 15 Product_EURO_SellingPrice
    sold = []                                 # 16 Product_QuantitySold
    qLeft =[]                                 # 17 Product_QuantityLeft
    shipFrom = []                             # 18 Product_ShippedFrom
    shipTo = []                               # 19 Product_ShippedTo
    image = []                                # 20 Product_Image
    image_vendor = []                         # 21 Vendor_Image
    href = []                                 # 22 Product_Links

    right_content = soup.find('div', {"class": "right-content"})
    listing = right_content.findAll('div', {"class": "col-1search"})
    listing = listing[3:]

    # Populating the Number of Products
    nm = len(listing)

    for a in listing:
        bae = a.findAll('a', href=True)

        # Adding the url to the list of urls
        link = bae[0].get('href')
        href.append(link)

        # Finding the Vendor
        vendor_name = a.find('p', {'class': 'padp'})
        vendor_name = vendor_name.find('a').text
        vendor_name = cleanString(vendor_name)
        vendor_name = vendor_name.strip()
        vendor.append(vendor_name)

        # Finding the Product
        product = a.find('div', {'class': 'col-1centre'})
        product = product.find('div', {'class': 'head'}).find('a').text
        product = cleanString(product)
        product = product.strip()
        name.append(product)

        # Finding the Category
        category_name = a.find('p', {'class': 'padp'}).text
        first_dash = category_name.find('-')
        second_dash = category_name[first_dash+1:].find('-')
        category_name = category_name[first_dash+1: first_dash + second_dash]
        category_name = cleanString(category_name)
        category_name = category_name.strip()
        category.append(category_name)

        # Finding Success Transactions
        vendor_success = a.find('p', {'class': 'padp'}).text
        first_dash = vendor_success.find('(')
        vendor_success = vendor_success[first_dash + 1:]
        vendor_success = cleanNumbers(vendor_success)
        vendor_success = vendor_success.strip()
        success.append(vendor_success)

        # Finding Views
        view_count = a.text
        view_count = view_count[view_count.find('Views:'): view_count.find('Sales:')]
        view_count = view_count.replace('Views:', ' ')
        view_count = cleanNumbers(view_count)
        view_count = view_count.strip()
        views.append(view_count)

        # Finding Quantity Sold
        sold_count = a.text
        sold_count = sold_count[sold_count.find('Sales:'): sold_count.find('Short')]
        sold_count = sold_count.replace('Sales:', ' ')
        sold_count = cleanNumbers(sold_count)
        sold_count = sold_count.strip()
        sold.append(sold_count)

        right = a.find('div', {'class': 'col-1right'})

        # Finding USD
        usd = right.find('a').text
        usd = "CAD " + usd.strip()
        USD.append(usd)

        # Finding BTC
        btc = right.text
        first_dash = btc.find('(')
        second_dash = btc[first_dash + 1:].find(')')
        btc = btc[first_dash + 1: first_dash + second_dash]
        btc = cleanNumbers(btc)
        btc = btc.strip()
        BTC.append(btc)

        # Finding Product Image
        product_image = right.find('img')
        product_image = product_image.get('src')
        product_image = product_image.split('base64,')[-1]
        image.append(product_image)

        # Searching for CVE and MS categories
        # no CVE or MS in WTN market
        cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
        if not cve:
            cveValue="-1"
        else:
            cee = " "
            for idx in cve:
                cee += (idx)
                cee += "  "
                cee = cee.replace(',', ' ')
                cee = cee.replace('\n', '')
            cveValue=cee
        CVE.append(cveValue)
        
        ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
        if not ms:
            MSValue="-1"
        else:
            me = " "
            for im in ms:
                me += (im)
                me += " "
                me = me.replace(',', ' ')
                me = me.replace('\n', '')
            MSValue=me
        MS.append(MSValue)

    # Populate the final variable (this should be a list with all fields scraped)
    return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
                            reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)


def wethenorth_links_parser(soup):

    # Returning all links that should be visited by the Crawler
    href = []
    right_content = soup.find('div',{"class": "right-content"})
    listing = right_content.findAll('div', {"class": "col-1search"})
    #cut out the irrelevant products that are in blue, the first three products of each page usually unrelated
    listing = listing[3:]
    for a in listing:

        link = a.find('a')
        link = link['href']
        href.append(link)

    return href