__author__ = 'Helium'

# Here, we are importing the auxiliary functions to clean or convert data
from Forums.Utilities.utilities import *
from datetime import date
from datetime import timedelta
import re

# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup

# This is the method to parse the Description Pages (one page to each topic in the Listing Pages)

def abyssForums_description_parser(soup):

    # Fields to be parsed

    topic = "-1"           # 0 topic name
    user = []              # 1 all users of each post
    addDate = []           # 2 all dated of each post
    feedback = []          # 3 all feedbacks of each vendor (this was found in just one Forum and with a number format)
    status = []            # 4 all user's authority in each post such as (adm, member, dangerous)
    reputation = []        # 5 all users's karma in each post (usually found as a number)
    sign = []              # 6 all user's signature in each post (usually a standard message after the content of the post)
    post = []              # 7 all messages of each post
    interest = []          # 8 all user's interest in each post
    image_user = []        # 9 all user avatars of each post
    image_post = []        # 10 all first images of each post

    # Finding the topic (should be just one coming from the Listing Page)

    li = soup.find("div", {"class": "page-body"}).find("h2", {"class": "topic-title"})
    topic = li.text.replace(",","")
    topic = topic.replace("\n","")
    topic = cleanString(topic.strip())

    regex = re.compile('post has-profile.*')
    posts = soup.find_all('div', {"class": regex})
    # print(len(posts))

    # For each message (post), get all the fields we are interested to:

    for ipost in posts:

        # Finding the author (user) of the post
        author = ipost.find('a', {"class": "username"}).text
        user.append(cleanString(author))  # Remember to clean the problematic characters

        status.append("-1")
        reputation.append("-1")
        interest.append("-1")
        sign.append("-1")
        feedback.append("-1")
        image_post.append("-1")

        img = ipost.find('dl', {"class": "postprofile"}).find('img')
        if img is not None:
            img = img.get('src').split('base64,')[-1]
        else:
            img = "-1"
        image_user.append(img)

        image_user.append("-1")

        date_time_obj = ipost.find('time').attrs
        date = date_time_obj['datetime'][0:10]
        time = date_time_obj['datetime'][11:19]
        date_time_obj = datetime.strptime(date + " " + time, '%Y-%m-%d %H:%M:%S')
        addDate.append(date_time_obj)

        # Finding the post

        inner = ipost.find('div', {"class": "content"})
        inner = inner.text.strip()
        post.append(cleanString(inner))

    # Populate the final variable (this should be a list with all fields scraped)

    row = (topic, user, status, reputation, interest, sign, post, feedback, addDate, image_user, image_post)

    # Sending the results

    return row

# This is the method to parse the Listing Pages (one page with many posts)
def abyssForums_listing_parser(soup: BeautifulSoup):


    nm = 0                         # this variable should receive the number of topics
    forum = "AbyssForum"           # 0 *forum name
    board = "-1"                   # 1 board name (the previous level of the topic in the Forum categorization tree.
                                   # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
    author = []                    # 2 all authors of each topic
    topic = []                     # 3 all topics
    views = []                     # 4 number of views of each topic
    posts = []                     # 5 number of posts of each topic
    href = []                      # 6 this variable should receive all cleaned urls (we will use this to do the marge between
    addDate = []                   # when the topic was created (difficult to find)
    image_author = []              # 8 all author avatars used in each topic

    # Listing and Description pages)
    #finding the board

    board = soup.find("h2", {"class": "forum-title"}).text
    board = cleanString(board.strip())

    type_of_posts = soup.find_all("li", {"class": re.compile("row bg\d")} )
    for literature in type_of_posts:
        title_of_post = literature.find("a", {"class": "topictitle"}).text
        title_of_post = cleanString(title_of_post)
        topic.append(title_of_post)
        user = literature.find("div", {"class": "topic-poster responsive-hide left-box"}).find("a", {"class": "username"}).text
        author.append(user)
        num_post = literature.find("dd", {"class": "posts"}).text.replace("Replies","").strip()
        posts.append(num_post)
        num_view = literature.find("dd", {"class": "views"}).text.replace("Views","").strip()
        views.append(num_view)
        #if int(num_post) != 0: join the last user who posted with the author?
        #    reply = literature.find("dd", {"class": "lastpost"}).find("a", {"class": "username"}).text
        #    user.append(reply)

        date_time_obj = literature.find('time').attrs
        date = date_time_obj['datetime'][0:10]
        time = date_time_obj['datetime'][11:19]
        date_added = datetime.strptime(date + " " + time, '%Y-%m-%d %H:%M:%S')

        addDate.append(date_added)

        listing_href = literature.find("a", {"class": "topictitle"}).get("href")
        href.append(listing_href)

        image_author.append("-1")

    nm = len(topic)

    return organizeTopics(
        forum=forum,
        nm=nm,
        board=board,
        author=author,
        topic=topic,
        views=views,
        posts=posts,
        href=href,
        addDate=addDate,
        image_author=image_author
    )




def abyssForum_links_parser(soup):

    # Returning all links that should be visited by the Crawler

    href = []
    #print(soup.find('table', {"class": "tborder clear"}).find(
     #   'tbody').find_all('tr', {"class": "inline_row"}))
    listing = soup.find_all('dl', {"class": "row-item topic_read"})

    for a in listing:
        link = a.find('div', {"class": "list-inner"}).find('a').get('href')

        href.append(link)

    return href