__author__ = 'DarkWeb'

# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *

# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup

import re

#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def AnonMarket_description_parser(soup):

    # Fields to be parsed
    vendor = "-1"  # 0 *Vendor_Name
    success = "-1"  # 1 Vendor_Successful_Transactions
    rating_vendor = "-1"  # 2 Vendor_Rating
    name = "-1"  # 3 *Product_Name
    describe = "-1"  # 4 Product_Description
    CVE = "-1"  # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about that much
    MS = "-1"  # 6 Product_MS_Classification (Microsoft Security) dont worry about that much
    category = "-1"  # 7 Product_Category
    views = "-1"  # 8 Product_Number_Of_Views
    reviews = "-1"  # 9 Product_Number_Of_Reviews
    rating_item = "-1"  # 10 Product_Rating
    addDate = "-1"  # 11 Product_AddedDate
    BTC = "-1"  # 12 Product_BTC_SellingPrice
    USD = "-1"  # 13 Product_USD_SellingPrice
    EURO = "-1"  # 14 Product_EURO_SellingPrice
    sold = "-1"  # 15 Product_QuantitySold
    left = "-1"  # 16 Product_QuantityLeft
    shipFrom = "-1"  # 17 Product_ShippedFrom
    shipTo = "-1"  # 18 Product_ShippedTo

    name_of_product = soup.find("div", {"class": "heading"}).text
    name = cleanString(name_of_product.strip())

    description_div = soup.find("div", {"class": "tab1"})
    if description_div is None:
        describe = "-1"
    else:
        describe = cleanString(description_div.text.strip())

    info_div = soup.find('div', {'class': 'information'})
    table = info_div.find('table') if info_div else None

    if table:
        # Find all table rows
        rows = table.find_all('tr')

        # Parse each row to get relevant data
        data = {}
        for row in rows:
            columns = row.find_all('td')
            if len(columns) == 3:
                key = columns[0].text.strip()
                value = columns[2].text.strip()
                data[key] = value

        # Extract specific data from the dictionary and assign them to individual variables
        vendor = data.get('Vendor', '-1')
        shipFrom = data.get('Location', '-1')
        shipTo = data.get('Ships to', '-1')
        category = data.get('Category', '-1')
        USD = data.get('Price', '-1').split()[0]
        left = data.get('Stock', '-1')

    # Populating the final variable (this should be a list with all fields scraped)
    row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
           BTC, USD, EURO, sold, left, shipFrom, shipTo)


    # Sending the results
    return row


#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def AnonMarket_listing_parser(soup):

    # Fields to be parsed
    nm = 0  # *Total_Products (Should be Integer)
    mktName = "AnonMarket"  # 0 *Marketplace_Name
    vendor = []  # 1 *Vendor y
    rating_vendor = []  # 2 Vendor_Rating
    success = []  # 3 Vendor_Successful_Transactions
    name = []  # 4 *Product_Name y
    CVE = []  # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
    MS = []  # 6 Product_MS_Classification (Microsoft Security)
    category = []  # 7 Product_Category y
    describe = []  # 8 Product_Description
    views = []  # 9 Product_Number_Of_Views
    reviews = []  # 10 Product_Number_Of_Reviews
    rating_item = []  # 11 Product_Rating
    addDate = []  # 12 Product_AddDate
    BTC = []  # 13 Product_BTC_SellingPrice
    USD = []  # 14 Product_USD_SellingPrice y
    EURO = []  # 15 Product_EURO_SellingPrice
    sold = []  # 16 Product_QuantitySold
    qLeft = []  # 17 Product_QuantityLeft
    shipFrom = []  # 18 Product_ShippedFrom
    shipTo = []  # 19 Product_ShippedTo
    href = []  # 20 Product_Links
    base_url = "http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion"

    products_list = soup.find_all('div', {'class': 'item'})
    nm = 0
    for product in products_list:
        try:
            name_of_product = product.find("div", {"class": "title"}).text.strip()
            name.append(name_of_product)

            name_of_vendor = product.find("a", {'class': 'seller'}).text.strip()
            vendor.append(name_of_vendor)

            cat = soup.find("div", {'class': 'heading'}).text
            category.append(cat)

            product_link_element = product.find("div", {"class": "title"}).find_parent('a')
            if product_link_element:
                link = product_link_element['href']
                if "/product/" in link and "/user/" not in link:
                    full_link = base_url + link
                    href.append(full_link)
                else:
                    href.append("-1")
            else:
                href.append("-1")

            # Append '-1' for unavailable data
            rating_vendor.append("-1")
            success.append("-1")
            CVE.append("-1")
            MS.append("-1")
            describe.append("-1")
            views.append("-1")
            reviews.append("-1")
            addDate.append("-1")
            BTC.append("-1")
            EURO.append("-1")
            sold.append("-1")
            qLeft.append("-1")
            shipFrom.append("-1")
            shipTo.append("-1")

            nm += 1

        except AttributeError as e:
            print("I'm somewhere I don't belong. I'm going to leave")
            continue

    # Populate the final variable (this should be a list with all fields scraped)
    return organizeProducts(
        marketplace = "AnonMarket",
        nm = nm,
        vendor = vendor,
        rating_vendor = rating_vendor,
        success_vendor = success,
        nombre = name,
        CVE = CVE,
        MS = MS,
        category = category,
        describe = describe,
        views = views,
        reviews = reviews,
        rating_item = rating_item,
        addDate = addDate,
        BTC = BTC,
        USD = USD,
        EURO = EURO,
        sold = sold,
        qLeft = qLeft,
        shipFrom = shipFrom,
        shipTo = shipTo,
        href = href
    )


#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def AnonMarket_links_parser(soup):
    # Base URL to prepend to each product link
    base_url = "http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion"

    # Returning all links that should be visited by the Crawler
    href = []

    # Using a shorter, but still unique, class name
    listing = soup.find('div', {'class': 'items'}).find_all('a', href=True, attrs={'href': lambda x: "/product/" in x})

    for a in listing:
        link = a.get('href')
        if link:  # Checks if 'href' attribute is not None
            # Prepending the base URL to the scraped link
            full_link = base_url + link
            href.append(full_link)

    # Filtering out any links that might not have '/product/' in them
    product_links = [link for link in href if '/product/' in link]

    return product_links