__author__ = 'DarkWeb'

# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup


#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def torbay_description_parser(soup):

    # Fields to be parsed
    vendor = "-1"  # 0 *Vendor_Name
    success = "-1"  # 1 Vendor_Successful_Transactions
    rating_vendor = "-1"  # 2 Vendor_Rating
    name = "-1"  # 3 *Product_Name
    describe = "-1"  # 4 Product_Description
    CVE = "-1"  # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about that much
    MS = "-1"  # 6 Product_MS_Classification (Microsoft Security) dont worry about that much
    category = "-1"  # 7 Product_Category
    views = "-1"  # 8 Product_Number_Of_Views
    reviews = "-1"  # 9 Product_Number_Of_Reviews
    rating_item = "-1"  # 10 Product_Rating
    addDate = "-1"  # 11 Product_AddedDate
    BTC = "-1"  # 12 Product_BTC_SellingPrice
    USD = "-1"  # 13 Product_USD_SellingPrice
    EURO = "-1"  # 14 Product_EURO_SellingPrice
    sold = "-1"  # 15 Product_QuantitySold
    left = "-1"  # 16 Product_QuantityLeft
    shipFrom = "-1"  # 17 Product_ShippedFrom
    shipTo = "-1"  # 18 Product_ShippedTo

    # Finding Product Name
    try:
        product_name = soup.find('div', {'class': 'product-information'}).find('h1').text
        name = cleanString(product_name.strip())
    except:
        try:
            product_name = soup.find('div', {'class': 'profile-info'}).find('h2').text
            name = cleanString(product_name.strip())
        except:
            # print(e)
            print("product name")

    # Finding Vendor FIx
    try:
        vendor_name = soup.find('div', {"class": "profile-info"}).find('h2').text
        vendor = cleanString(vendor_name.strip())
    except:
        print("description vendor name failed\n")

    # Finding Prices
    try:
        USD = soup.find('div', {'class': "total-price"}).find('span').text.strip()
    except:
        print("description price failed\n")

    # Finding the Product Category
    try:
        cat = soup.find('div', {'class': "profile-info"}).find('p').text
        category = cleanString(cat.strip())
    except:
        print("description product category failed")

    # Finding the Product description
    try:
        describe = soup.find('div', {'class': "info"}).find('p').text
        if "\n" in describe:
            describe = describe.replace("\n", " ")
            describe = describe.replace("\r", " ")
        describe = cleanString(describe.strip())
    except:
        # print("product desc")
        try:
            describe = soup.find('div', {'class': 'info'}).text
            describe = cleanString(describe.strip())
        except:
            print("Product description")

    # Populating the final variable (this should be a list with all fields scraped)
    row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
           BTC, USD, EURO, sold, left, shipFrom, shipTo)

    # Sending the results
    return row


#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def torbay_listing_parser(soup):

    # Fields to be parsed
    nm = 0  # *Total_Products (Should be Integer)
    mktName = "TorBay"  # 0 *Marketplace_Name
    vendor = []  # 1 *Vendor y
    rating_vendor = []  # 2 Vendor_Rating
    success = []  # 3 Vendor_Successful_Transactions
    name = []  # 4 *Product_Name y
    CVE = []  # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
    MS = []  # 6 Product_MS_Classification (Microsoft Security) dont worry about this
    category = []  # 7 Product_Category y
    describe = []  # 8 Product_Description
    views = []  # 9 Product_Number_Of_Views
    reviews = []  # 10 Product_Number_Of_Reviews
    rating_item = []  # 11 Product_Rating
    addDate = []  # 12 Product_AddDate
    BTC = []  # 13 Product_BTC_SellingPrice
    USD = []  # 14 Product_USD_SellingPrice y
    EURO = []  # 15 Product_EURO_SellingPrice
    sold = []  # 16 Product_QuantitySold
    qLeft = []  # 17 Product_QuantityLeft
    shipFrom = []  # 18 Product_ShippedFrom
    shipTo = []  # 19 Product_ShippedTo
    href = []  # 20 Product_Links

    listing = soup.findAll('div', {"class": "product-card"})

    # Populating the Number of Products
    nm = len(listing)

    for a in listing:

        try:
            product_name = a.find('p', {'class': 'name'}).text
            name.append(cleanString(product_name.strip()))
        except:
            print("product name")

        try:
            prod = a.find('p', {'class': 'price'}).text  # price
            USD.append(cleanString(prod.strip()))
        except:
            print("USD")

        try:
            ven = a.find('div', {'class': 'pc-footer'}).find('div').find('a').text  # pc-footer
            vendor.append(cleanString(ven.strip()))
            # print(ven)
        except:
            print("vendor")

        try:
            h = a.find('p', {'class': 'name'}).find('a').get('href')
            href.append(h)
        except:
            print("in href")

        CVE.append("-1")
        MS.append("-1")
        rating_vendor.append("-1")
        success.append("-1")
        describe.append("-1")
        views.append("-1")
        reviews.append("-1")
        rating_item.append("-1")
        addDate.append("-1")
        BTC.append("-1")
        EURO.append("-1")
        sold.append("-1")
        qLeft.append("-1")
        shipFrom.append("-1")
        shipTo.append("-1")
        category.append("Hacking")

    # Populate the final variable (this should be a list with all fields scraped)
    return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
                            reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def torbay_links_parser(soup):

    # Returning all links that should be visited by the Crawler

    href = []
    listing = soup.find('section', {"id": "content"}).findAll('div', {"class": "product-card"})

    for a in listing:
        bae = a.find('div', {"class": "pc-footer"}).find('a', {"class": "btn btn-primary"}, href=True)
        link = bae['href']
        href.append(link)

    return href