__author__ = 'DarkWeb'

# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *

# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup


#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def darkmatter_description_parser(soup):

    # Fields to be parsed

    vendor = "-1"  # 0 *Vendor_Name
    success = "-1"  # 1 Vendor_Successful_Transactions
    rating_vendor = "-1"  # 2 Vendor_Rating
    name = "-1"  # 3 *Product_Name
    describe = "-1"  # 4 Product_Description
    CVE = "-1"  # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
    MS = "-1"  # 6 Product_MS_Classification (Microsoft Security)
    category = "-1"  # 7 Product_Category
    views = "-1"  # 8 Product_Number_Of_Views
    reviews = "-1"  # 9 Product_Number_Of_Reviews
    rating_item = "-1"  # 10 Product_Rating
    addDate = "-1"  # 11 Product_AddedDate
    BTC = "-1"  # 12 Product_BTC_SellingPrice
    USD = "-1"  # 13 Product_USD_SellingPrice
    EURO = "-1"  # 14 Product_EURO_SellingPrice
    sold = "-1"  # 15 Product_QuantitySold
    left = "-1"  # 16 Product_QuantityLeft
    shipFrom = "-1"  # 17 Product_ShippedFrom
    shipTo = "-1"  # 18 Product_ShippedTo
    image = "-1"   # 19 Product_Image
    vendor_image = "-1" # 20 Vendor_Image

    # 0 *Vendor_Name
    try:
        temp = soup.find('table', {'class', 'vtable'})
        temp = temp.findAll('tr')
        temp2 = temp[3].find('a').text
        vendor = cleanString(temp2.strip())
    except:
        temp = soup.find('table', {'class', 'vtable'})
        temp = temp.findAll('tr')
        temp2 = temp[4].find('a').text
        vendor = cleanString(temp2.strip())

    # product name
    name = soup.find('div', {'class', 'title-h2'}).text
    name = cleanString(name.strip())

    #product description
    temp = soup.find('pre', {'class', 'description'}).text
    temp = temp.replace('\n', ' ')
    describe = cleanString(temp.strip())

    #product category
    try:
        temp = soup.find('table', {'class', 'vtable'})
        temp = temp.findAll('tr')
        temp2 = temp[4].find('th').text
        temp2 = cleanString(temp2)
        if (temp2 == "Category"):
            temp2 = temp[4].find('a').text
            category = cleanString(temp2.strip())
    except:
        temp = soup.find('table', {'class', 'vtable'})
        temp = temp.findAll('tr')
        temp2 = temp[5].find('th').text
        temp2 = cleanString(temp2.strip)
        if (temp2 == "Category"):
            temp2 = temp[5].find('a').text
            category = cleanString(temp2.strip())

    # usd
    temp = soup.find('table', {'class', 'vtable'})
    temp = temp.findAll('tr')
    temp2 = temp[1].find('td').text
    temp2 = temp2.replace(' USD', '')
    USD = cleanString(temp2)

    # 15 Product_QuantitySold
    temp = soup.find('table', {'class', 'vtable'})
    temp = temp.findAll('tr')
    temp2 = temp[5].find('th').text
    temp2 = cleanString(temp2)
    temp3 = temp[6].find('th').text
    temp3 = cleanString(temp3)
    if (temp2 == "Sold"):
        temp2 = temp[5].find('td').text
        sold = cleanString(temp2.strip())
    elif (temp3 == "Sold"):
        temp2 = temp[6].find('td').text
        sold = cleanString(temp2.strip())

    # Finding Product Image
    image = soup.find('td', {"class": "vtop"}).find('img')
    if image is not None:
        image = image.get('src').split('base64,')[-1]
    else:
        image = '-1'

    # Populating the final variable (this should be a list with all fields scraped)
    row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
           BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)

    # Sending the results
    return row


#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def darkmatter_listing_parser(soup):

    # Fields to be parsed

    nm = 0                                    # *Total_Products (Should be Integer)
    mktName = "DarkMatter"                    # 0 *Marketplace_Name
    vendor = []                               # 1 *Vendor y
    rating = []                              # 2 Vendor_Rating
    success = []                              # 3 Vendor_Successful_Transactions
    name = []                                 # 4 *Product_Name y
    CVE = []                                  # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
    MS = []                                   # 6 Product_MS_Classification (Microsoft Security)
    category = []                             # 7 Product_Category y
    describe = []                             # 8 Product_Description
    views = []                                # 9 Product_Number_Of_Views
    reviews = []                              # 10 Product_Number_Of_Reviews
    rating_item = []                          # 11 Product_Rating
    addDate = []                              # 12 Product_AddDate
    BTC = []                                  # 13 Product_BTC_SellingPrice
    USD = []                                  # 14 Product_USD_SellingPrice y
    EURO = []                                 # 15 Product_EURO_SellingPrice
    sold = []                                 # 16 Product_QuantitySold
    qLeft =[]                                 # 17 Product_QuantityLeft
    shipFrom = []                             # 18 Product_ShippedFrom
    shipTo = []                               # 19 Product_ShippedTo
    image = []                                # 20 Product_Image
    image_vendor = []                         # 21 Vendor_Image
    href = []                                 # 22 Product_Links

    names = soup.find('div', {"class": "content"}).findAll('td', {"class": "lefted", "colspan": "3"})
    left = soup.find('div', {"class": "content"}).findAll('table', {"class": "vtable"})
    right = soup.find('div', {"class": "content"}).findAll('td', {"class": "vtop centered"})
    images = soup.find('div', {"class": "content"}).findAll('td', {"class": "vcentered"})

    # vtop centered
    count = 0
    # Populating the Number of Products
    nm = len(names)

    for a in names:
        # product name
        temp = a.find('a').text
        if ("pcs x " in temp):
            index = temp.index("pcs x ")
            result = temp[index + len("pcs x "):]
            name.append(cleanString(result))
        elif("pks x " in temp):
            index = temp.index("pks x ")
            result = temp[index + len("pks x "):]
            name.append(cleanString(result))
        elif ("job x " in temp):
            index = temp.index("job x ")
            result = temp[index + len("job x "):]
            name.append(cleanString(result))

        CVE.append("-1")
        MS.append("-1")

        temp2 = left[count].findAll('tr')

        length_2 = len(temp2) - 1

        # category
        temp = temp2[1].find('td').text
        category.append(cleanString(temp.strip()))

        describe.append("-1")
        #escrow.append("-1")
        views.append("-1")
        reviews.append("-1")
        addDate.append("-1")
        #lastSeen.append("-1")
        BTC.append("-1")
        image_vendor.append("-1")

        # usd
        temp3 = right[count*2].find('span').text
        temp = temp3.replace(' USD', '')
        USD.append(cleanString(temp))

        EURO.append("-1")

        # 14 Product_QuantitySold
        temp3 = temp2[length_2].find('th').text
        temp3 = cleanString(temp3)
        if (temp3 == "Sold:"):
            temp = temp2[length_2].find('td').text
            sold.append(cleanString(temp.strip()))
        else:
            sold.append("-1")

        qLeft.append("-1")
        shipFrom.append("-1")

        # ship to
        temp3 = temp2[length_2].find('th').text
        temp3 = cleanString(temp3)
        if (temp3 == "Ship To:"):
            temp = temp2[length_2].find('td').text
            shipTo.append(cleanString(temp.strip()))
        else:
            shipTo.append("-1")

        # vendor
        temp = temp2[0].find('a').text
        vendor.append(cleanString(temp.strip()))

        # add product rating (stars)
        rating.append("-1")
        success.append("-1")

        temp = a.find('a').get('href')
        href.append(temp)

        # Finding Product Image
        image = images[count*2].find('img').get('src')
        image = image.split('base64,')[-1]

        count += 1

        rating_item.append("-1")

    # Populate the final variable (this should be a list with all fields scraped)
    return organizeProducts(mktName, nm, vendor, rating, success, name, CVE, MS, category, describe, views,
                            reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)


#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def darkmatter_links_parser(soup):

    # Returning all links that should be visited by the Crawler

    href = []
    listing = soup.find('div', {"class": "content"}).findAll('td', {"class": "lefted", 'colspan': '3'})

    for a in listing:
        bae = a.find('a', href=True)
        link = bae['href']
        href.append(link)

    return href