__author__ = 'DarkWeb'

# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *

# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup


#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def darkmatter_description_parser(soup):

    # Fields to be parsed

    vendor = "-1"  # 0 *Vendor_Name
    success = "-1"  # 1 Vendor_Successful_Transactions
    rating_vendor = "-1"  # 2 Vendor_Rating
    name = "-1"  # 3 *Product_Name
    describe = "-1"  # 4 Product_Description
    CVE = "-1"  # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
    MS = "-1"  # 6 Product_MS_Classification (Microsoft Security)
    category = "-1"  # 7 Product_Category
    views = "-1"  # 8 Product_Number_Of_Views
    reviews = "-1"  # 9 Product_Number_Of_Reviews
    rating_item = "-1"  # 10 Product_Rating
    addDate = "-1"  # 11 Product_AddedDate
    BTC = "-1"  # 12 Product_BTC_SellingPrice
    USD = "-1"  # 13 Product_USD_SellingPrice
    EURO = "-1"  # 14 Product_EURO_SellingPrice
    sold = "-1"  # 15 Product_QuantitySold
    left = "-1"  # 16 Product_QuantityLeft
    shipFrom = "-1"  # 17 Product_ShippedFrom
    shipTo = "-1"  # 18 Product_ShippedTo

    # 0 *Vendor_Name
    try:
        temp = soup.find('table', {'class', 'vtable'})
        temp = temp.findAll('tr')
        temp2 = temp[3].find('a').text
        name = cleanString(temp2.strip())
    except:
        try:
            temp = soup.find('table', {'class', 'vtable'})
            temp = temp.findAll('tr')
            temp2 = temp[4].find('a').text
            name = cleanString(temp2.strip())
        except:
            print("vendor")

    # product name
    try:
        name = soup.find('div', {'class', 'title-h2'}).text
        name = cleanString(name.strip())
    except:
        print("name")

    #product description
    try:
        temp = soup.find('pre', {'class', 'description'}).text
        temp = temp.replace('\n', ' ')
        describe = cleanString(temp.strip())
    except:
        print("description")

    #product category
    try:
        temp = soup.find('table', {'class', 'vtable'})
        temp = temp.findAll('tr')
        temp2 = temp[4].find('th').text
        temp2 = cleanString(temp2)
        if (temp2 == "Category"):
            temp2 = temp[4].find('a').text
            category = cleanString(temp2.strip())
    except:
        try:
            temp = soup.find('table', {'class', 'vtable'})
            temp = temp.findAll('tr')
            temp2 = temp[5].find('th').text
            temp2 = cleanString(temp2.strip)
            if (temp2 == "Category"):
                temp2 = temp[5].find('a').text
                category = cleanString(temp2.strip())
        except:
            print('category')

    # usd
    try:
        temp = soup.find('table', {'class', 'vtable'})
        temp = temp.findAll('tr')
        temp2 = temp[1].find('td').text
        temp2 = temp2.replace(' USD', '')
        USD = cleanString(temp2)
    except:
        print('USD')

    # 15 Product_QuantitySold
    try:
        temp = soup.find('table', {'class', 'vtable'})
        temp = temp.findAll('tr')
        temp2 = temp[5].find('th').text
        temp2 = cleanString(temp2)
        temp3 = temp[6].find('th').text
        temp3 = cleanString(temp3)
        if (temp2 == "Sold"):
            temp2 = temp[5].find('td').text
            sold = cleanString(temp2.strip())
        elif (temp3 == "Sold"):
            temp2 = temp[6].find('td').text
            sold = cleanString(temp2.strip())
    except:
        print('sold')


    # Populating the final variable (this should be a list with all fields scraped)
    row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
           BTC, USD, EURO, sold, left, shipFrom, shipTo)

    # Sending the results
    return row


#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def darkmatter_listing_parser(soup):

    # Fields to be parsed
    nm = 0                                    # Total_Products (Should be Integer)
    mktName = "DarkMatter"                       # 0 Marketplace_Name
    name = []                                 # 1 Product_Name
    CVE = []                                  # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures)
    MS = []                                   # 3 Product_MS_Classification (Microsoft Security)
    category = []                             # 4 Product_Category
    describe = []                             # 5 Product_Description
    escrow = []                               # 6 Vendor_Warranty
    views = []                                # 7 Product_Number_Of_Views
    reviews = []                              # 8 Product_Number_Of_Reviews
    addDate = []                              # 9 Product_AddDate
    rating_item = []  # 11 Product_Rating
    lastSeen = []                             # 10 Product_LastViewDate
    BTC = []                                  # 11 Product_BTC_SellingPrice
    USD = []                                  # 12 Product_USD_SellingPrice
    EURO = []                                 # 13 Product_EURO_SellingPrice
    sold = []                                 # 14 Product_QuantitySold
    qLeft =[]                                 # 15 Product_QuantityLeft
    shipFrom = []                             # 16 Product_ShippedFrom
    shipTo = []                               # 17 Product_ShippedTo
    vendor = []                               # 18 Vendor
    rating = []                               # 19 Vendor_Rating
    success = []                              # 20 Vendor_Successful_Transactions
    href = []                                 # 23 Product_Links (Urls)

    names = soup.find('div', {"class": "content"}).findAll('td', {"class": "lefted", "colspan": "3"})
    left = soup.find('div', {"class": "content"}).findAll('table', {"class": "vtable"})
    right = soup.find('div', {"class": "content"}).findAll('td', {"class": "vtop centered"})

    # vtop centered
    count = 0
    # Populating the Number of Products
    nm = len(names)

    for a in names:
        # product name
        try:
            temp = a.find('a').text
            if ("pcs x " in temp):
                index = temp.index("pcs x ")
                result = temp[index + len("pcs x "):]
                name.append(cleanString(result))
            elif("pks x " in temp):
                index = temp.index("pks x ")
                result = temp[index + len("pks x "):]
                name.append(cleanString(temp))
        except Exception as e:
            print("product name", e)

        CVE.append("-1")
        MS.append("-1")

        temp2 = left[count].findAll('tr')

        length_2 = len(temp2) - 1

        # category
        try:
            temp = temp2[1].find('td').text
            category.append(cleanString(temp.strip()))
        except:
            print('category')

        describe.append("-1")
        escrow.append("-1")
        views.append("-1")
        reviews.append("-1")
        addDate.append("-1")
        lastSeen.append("-1")
        BTC.append("-1")

        # usd
        try:
            temp3 = right[count*2].find('span').text
            temp = temp3.replace(' USD', '')
            USD.append(cleanString(temp))
        except:
            print('USD')

        EURO.append("-1")

        # 14 Product_QuantitySold
        try:
            temp3 = temp2[length_2].find('th').text
            temp3 = cleanString(temp3)
            if (temp3 == "Sold:"):
                temp = temp2[length_2].find('td').text
                sold.append(cleanString(temp.strip()))
            else:
                sold.append("-1")
        except Exception as e:
            sold.append("-1")
            print('sold', e)

        qLeft.append("-1")
        shipFrom.append("-1")

        # ship to
        try:
            temp3 = temp2[length_2].find('th').text
            temp3 = cleanString(temp3)
            if (temp3 == "Ship To:"):
                temp = temp2[length_2].find('td').text
                shipTo.append(cleanString(temp.strip()))
            else:
                shipTo.append("-1")
        except Exception as e:
            shipTo.append("-1")
            print('shopto')

        # vendor
        try:
            temp = temp2[0].find('a').text
            vendor.append(cleanString(temp.strip()))
        except:
            print('vendor')

        rating.append("-1")
        success.append("-1")

        try:
            temp = a.find('a').get('href')
            href.append(temp)
        except:
            print('href')

        count += 1

        rating_item.append("-1")

    # Populate the final variable (this should be a list with all fields scraped)
    return organizeProducts(mktName, nm, vendor, rating, success, name, CVE, MS, category, describe, views,
                            reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)


#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def darkmatter_links_parser(soup):

    # Returning all links that should be visited by the Crawler

    href = []
    listing = soup.find('div', {"class": "content"}).findAll('td', {"class": "lefted", 'colspan': '3'})

    for a in listing:
        bae = a.find('a', href=True)
        link = bae['href']
        href.append(link)

    return href