__author__ = 'DarkWeb'

# Here, we are importing the auxiliary functions to clean or convert data
from Forums.Utilities.utilities import *

# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup


# This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def incogsnoo_description_parser(soup):

    # Fields to be parsed

    topic = "-1"           # 0 topic name ***$
    user = []              # 1 all users of each post ***$ author
    status = []            # 2 all user's authority in each post such as (adm, member, dangerous)
    reputation = []        # 3 all users's karma in each post (usually found as a number) ??? ups
    interest = []          # 4 all user's interest in each post
    sign = []              # 5 all user's signature in each post (usually a standard message after the content of the post)
    post = []              # 6 all messages of each post
    feedback = []          # 7 all feedbacks of each user (this was found in just one Forum and with a number format)
    addDate = []           # 8 all dated of each post ***$ created
    image_user = []        # 9 all user avatars of each post
    image_post = []        # 10 all first images of each post

    # Finding the topic (should be just one coming from the Listing Page)
    topic = soup.find("div", {"class": "title"}).find("h2").text
    topic = topic.replace('"', '')
    topic = cleanString(topic.strip())

    # the first post's html is separated from all subsequent comments/replies/posts to the first post
    # so parse the first post by itself first

    # Finding body of first post
    post_text = soup.find("div", {"class": "md"})
    if post_text:
        post_text = post_text.text.strip()
        post.append(cleanString(post_text))
    else: # some posts just links to other sites/articles/videos and have no text by itself
        post_link = soup.find("div", {"class": "title"}).find("a").get("href")
        post_link = cleanLink(post_link)
        post.append(post_link)

    # User
    p_tag = soup.find("p", {"class": "submitted"})
    author = p_tag.find("a")
    if author:
        author = author.text.strip()
    elif "[deleted]" in p_tag.text:
        author = "deleted"
    else:
        author = "-1"
    user.append(cleanString(author))

    # Finding the status of the author
    status.append("-1")

    # Finding the reputation of the user
    reputation.append("-1")

    # Finding the interest of the author
    interest.append("-1")

    # Finding signature
    sign.append("-1")

    # Finding feedback
    upvote = soup.find("div", {"class": "score"}).find("span")
    if upvote:
        upvote = upvote.text.strip()
    else:
        upvote = "-1"
    feedback.append(cleanString(upvote))

    # Finding the date of the post - e.g. "Fri, 18 December 2023 05:49:20 GMT"
    dt = soup.find("p", {"class": "submitted"}).find("span")["title"]
    # Convert to datetime object - e.g. 2023-12-18 05:49:20
    date_time_obj = datetime.strptime(dt, '%a, %d %b %Y %H:%M:%S %Z')
    # sdate = date_time_obj.strftime('%m %d %Y')
    # stime = date_time_obj.strftime('%I:%M %p')

    # date = convertDate(sdate, "english", datetime.now()) + " " + stime
    # e.g. "12/18/2023 05:49 AM"
    addDate.append(date_time_obj)

    image_user.append("-1")
    image_post.append("-1")

    posts = soup.find("div", {"class": "comments"}).findAll("details")

    # For each message (post), get all the fields we are interested to:

    for ipost in posts:

        # Finding user
        p_tag = ipost.find("p", {"class": "author"})
        author = p_tag.find("a")
        if author:
            author = author.text.strip()
        elif "[deleted]" in p_tag.text:
            author = "deleted"
        else:
            author = "-1"
        user.append(cleanString(author))

        # Finding the status of the author
        status.append("-1")

        # Finding the reputation of the user
        reputation.append("-1")

        # Finding the interest of the author
        interest.append("-1")

        # Finding signature
        sign.append("-1")

        # Finding the post
        comment = ipost.find("div", {"class": "md"})
        if comment:
            comment = comment.text.strip()
        else:
            comment = "-1"
        post.append(cleanString(comment))

        # Finding feedback
        upvote = ipost.find("p", {"class": "ups"})
        if upvote:
            upvote = upvote.text.strip().split()[0]
        else:
            upvote = "-1"
        feedback.append(cleanString(upvote))

        # Finding the date of the post - e.g. "Fri, 18 December 2023 05:49:20 GMT"
        dt = ipost.find("p", {"class": "created"})["title"]
        # Convert to datetime object - e.g. 2023-12-18 05:49:20
        date_time_obj = datetime.strptime(dt, '%a, %d %b %Y %H:%M:%S %Z')
        # sdate = date_time_obj.strftime('%m %d %Y')
        # stime = date_time_obj.strftime('%I:%M %p')

        # date = convertDate(sdate, "english", datetime.now()) + " " + stime
        # e.g. "12/18/2023 05:49 AM"
        addDate.append(date_time_obj)

        image_user.append("-1")
        image_post.append("-1")

    # Populate the final variable (this should be a list with all fields scraped)

    row = (topic, user, status, reputation, interest, sign, post, feedback, addDate, image_user, image_post)

    # Sending the results

    return row


# This is the method to parse the Listing Pages (one page with many posts)
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def incogsnoo_listing_parser(soup):

    nm = 0              # *this variable should receive the number of topics
    forum = "Incogsnoo"   # 0 *forum name
    board = "-1"        # 1 *board name (the previous level of the topic in the Forum categorization tree.
                        # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
    author = []         # 2 *all authors of each topic
    topic = []          # 3 *all topics
    views = []          # 4 number of views of each topic
    posts = []          # 5 number of posts of each topic
    href = []           # 6 this variable should receive all cleaned urls (we will use this to do the marge between
                        # Listing and Description pages)
    addDate = []        # 7 when the topic was created (difficult to find)
    image_author = []  # 8 all author avatars used in each topic

    # Finding the board (should be just one)
    board = soup.find("a", {"class": "subreddit"}).find("h2")
    board = cleanString(board.text.strip())

    # Finding the repeated tag that corresponds to the listing of topics
    itopics = soup.find("div", {"id": "links", "class": "sr"}).findAll("div", {"class": "link"})
    itopics.pop()
    # Counting how many topics we have found so far

    nm = len(itopics)

    index = 0
    for itopic in itopics:

        # Finding the author of the topic
        p_tag = itopic.find("p", {"class": "submitted"})
        user = p_tag.find("a")
        if user:
            user = user.text.strip()
        elif "[deleted]" in p_tag.text:
            user = "deleted"
        else:
            user = "-1"
        author.append(cleanString(user))

        # Adding the topic to the topic list
        topic_title = itopic.find("div", {"class": "title"}).find("h2").text
        topic.append(cleanString(topic_title))

        # Finding the number of Views
        views.append("-1")

        # Finding the number of posts
        comments = itopic.find("a", {"class": "comments"}).text
        number_comments = comments.split()[0]
        posts.append(cleanString(number_comments))

        # Adding the url to the list of urls
        link = itopic.find("a", {"class": "comments"}).get("href")
        href.append(link)

        # Finding dates
        p_tag = itopic.find("p", {"class": "submitted"})
        dt = p_tag.find("span")["title"]
        date_time_obj = datetime.strptime(dt,'%a, %d %b %Y %H:%M:%S %Z')
        # sdate = date_time_obj.strftime('%m %d %Y')
        # stime = date_time_obj.strftime('%I:%M %p')
        # date = convertDate(sdate, "english", datetime.now()) + " " + stime
        # e.g. "12/18/2023 05:49 AM"
        addDate.append(date_time_obj)

        image_author.append("-1")

        index += 1

    return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate, image_author)


#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def incogsnoo_links_parser(soup):

    # Returning all links that should be visited by the Crawler

    href = []

    listing_parent = soup.find("div", {"id": "links", "class": "sr"})
    listing = listing_parent.findAll("div", {"class": "entry"})

    for entry in listing:

        parent_div = entry.find("div", {"class": "meta"}).find("div", {"class", "links"})
        a_tag = parent_div.find("a", {"class", "comments"})
        if a_tag:
            href.append(a_tag.get("href"))

    return href