__author__ = 'Helium'

# Here, we are importing the auxiliary functions to clean or convert data
from Forums.Utilities.utilities import *
from datetime import date
from datetime import timedelta
import re

# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup

# This is the method to parse the Description Pages (one page to each topic in the Listing Pages)

def cryptBB_description_parser(soup):

    # Fields to be parsed

    topic = "-1"           # topic name
    user = []              # all users of each post
    addDate = []           # all dated of each post
    feedback = []          # all feedbacks of each vendor (this was found in just one Forum and with a number format)
    status = []            # all user's authority in each post such as (adm, member, dangerous)
    reputation = []        # all users's karma in each post (usually found as a number)
    sign = []              # all user's signature in each post (usually a standard message after the content of the post)
    post = []              # all messages of each post
    interest = []          # all user's interest in each post

    # Finding the topic (should be just one coming from the Listing Page)

    li = soup.find("td", {"class": "thead"}).find('strong')
    topic = li.text
    topic = re.sub("\[\w*\]", '', topic)

    topic = topic.replace(",","")
    topic = topic.replace("\n","")
    topic = cleanString(topic.strip())
    print(topic)
    # Finding the repeated tag that corresponds to the listing of posts

    # posts = soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg"}) + \
    #         soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg2"})

    try:
        posts = soup.find('table', {"class": "tborder tfixed clear"}).find('td', {"id": "posts_container"}).find_all(
            'div', {"class": "post"})
        # print(len(posts))

        # For each message (post), get all the fields we are interested to:

        for ipost in posts:

            # Finding a first level of the HTML page

            # post_wrapper = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "poster"})
            post_wrapper = ipost.find('span', {"class": "largetext"})
            # Finding the author (user) of the post

            # author = post_wrapper.find('h4')
            author = post_wrapper.text.strip()
            # print("author " + author)
            user.append(cleanString(author))  # Remember to clean the problematic characters

            # Finding the status of the author

            smalltext = ipost.find('div', {"class": "post_author"})

            # Testing here two possibilities to find this status and combine them
            if ipost.find('div', {"class": "deleted_post_author"}):
                status.append(-1)
                interest.append(-1)
                reputation.append(-1)
                addDate.append(-1)
                post.append("THIS POST HAS BEEN REMOVED!")
                sign.append(-1)
                feedback.append(-1)
                continue

            # CryptBB does have membergroup and postgroup

            membergroup = smalltext.find('div', {"class": "profile-rank"})
            postgroup = smalltext.find('div', {"class": "postgroup"})
            if membergroup != None:
                membergroup = membergroup.text.strip()
                if postgroup != None:
                    postgroup = postgroup.text.strip()
                    membergroup = membergroup + " - " + postgroup
            else:
                if postgroup != None:
                    membergroup = postgroup.text.strip()
                else:
                    membergroup = "-1"

            status.append(cleanString(membergroup))
            # print("status " + cleanString(membergroup))
            # Finding the interest of the author
            # CryptBB does not have blurb
            blurb = smalltext.find('li', {"class": "blurb"})
            if blurb != None:
                blurb = blurb.text.strip()
            else:
                blurb = "-1"
            interest.append(cleanString(blurb))

            # Finding the reputation of the user
            # CryptBB does have reputation
            author_stats = smalltext.find('div', {"class": "author_statistics"})
            karma = author_stats.find('strong')
            if karma != None:
                karma = karma.text
                karma = karma.replace("Community Rating: ", "")
                karma = karma.replace("Karma: ", "")
                karma = karma.strip()
            else:
                karma = "-1"
            reputation.append(cleanString(karma))
            # print("karma " + cleanString(karma))
            # Getting here another good tag to find the post date, post content and users' signature

            postarea = ipost.find('div', {"class": "post_content"})

            dt = postarea.find('span', {"class": "post_date"}).text
            # dt = dt.strip().split()
            dt = dt.strip()
            day=date.today()
            if "Yesterday" in dt:
                yesterday = day - timedelta(days=1)
                yesterday = yesterday.strftime('%m-%d-%Y')
                stime = dt.replace('Yesterday,','').strip()
                date_time_obj = yesterday+ ', '+stime
                date_time_obj = datetime.strptime(date_time_obj,'%m-%d-%Y, %I:%M %p')
            elif "hours ago" in dt:
                day = day.strftime('%m-%d-%Y')
                date_time_obj = postarea.find('span', {"class": "post_date"}).find('span')['title']
                date_time_obj = datetime.strptime(date_time_obj, '%m-%d-%Y, %I:%M %p')
            else:
                date_time_obj = datetime.strptime(dt, '%m-%d-%Y, %I:%M %p')
                stime = date_time_obj.strftime('%b %d, %Y')
                sdate = date_time_obj.strftime('%I:%M %p')


            addDate.append(date_time_obj)
            # print("date " + str(date_time_obj))
            # Finding the date of the post
            # date_time_obj = datetime.strptime(dt, '%a %b %d, %Y %I:%M %p')
            # smalltext = postarea.find('div', {"class": "flow_hidden"}).find('div', {"class": "keyinfo"})\
            #     .find('div', {"class": "smalltext"})
            # sdatetime = smalltext.text
            # sdatetime = sdatetime.replace(u"\xab","") # Removing unnecessary characters
            # sdatetime = sdatetime.replace(u"\xbb","") # Removing unnecessary characters
            # sdatetime = sdatetime.split("on: ")       # Removing unnecessary characters
            # sdatetime = sdatetime[1].strip()
            # stime = sdatetime[:-12:-1]                # Finding the time of the post
            # stime = stime[::-1]
            # sdate = sdatetime.replace(stime,"")       # Finding the date of the post
            # sdate = sdate.replace(",","")
            # sdate = sdate.strip()

            # Covert the date of the post that can be informed as: "12 February 2016", "today", "yesterday". We need
            # a date format here as "mm/dd/yyyy"

            # addDate.append(convertDate(sdate,"english", crawlerDate) + " " + stime)

            # Finding the post

            inner = postarea.find('div', {"class": "post_body scaleimages"})
            inner = inner.text.strip()
            # print(inner)
            post.append(cleanString(inner))

            # Finding the users's signature

            # signature = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "moderatorbar"}).find('div', {"class": "signature"})
            signature = ipost.find('div', {"class": "signature scaleimages"})
            if signature != None:
                signature = signature.text.strip()
                # print(signature)
            else:
                signature = "-1"
            sign.append(cleanString(signature))

            # As no information about users's feedback was found, just assign "-1" to the variable

            feedback.append("-1")
    except:
        if soup.find('td', {"class": "trow1"}).text == " You do not have permission to access this page. ":
            user.append("-1")
            status.append(-1)
            interest.append(-1)
            reputation.append(-1)
            addDate.append(-1)
            post.append("NO ACCESS TO THIS PAGE!")
            sign.append(-1)
            feedback.append(-1)


    # Populate the final variable (this should be a list with all fields scraped)

    row = (topic, post, user, addDate, feedback, status, reputation, sign, interest)

    # Sending the results

    return row

# This is the method to parse the Listing Pages (one page with many posts)
def AbyssForums_listing_parser(soup: BeautifulSoup):
    board = "-1"  # board name (the previous level of the topic in the Forum categorization tree.
    # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)

    nm = 0  # this variable should receive the number of topics
    topic = []  # all topics
    user = []  # all users of each topic
    post = []  # number of posts of each topic
    view = []  # number of views of each topic
    addDate = []  # when the topic was created (difficult to find)
    href = []  # this variable should receive all cleaned urls (we will use this to do the marge between
    # Listing and Description pages)
    #finding the board

    board = soup.find("title").text
    board = cleanString(board.strip())

    type_of_posts = soup.find_all("li", {"class": re.compile("row bg\d")} )
    for literature in type_of_posts:
        title_of_post = literature.find("a", {"class": "topictitle"}).text
        topic.append(title_of_post)
        author = literature.find("div", {"class": "topic-poster responsive-hide left-box"}).find("a", {"class": "username"}).text
        user.append(author)
        num_post = literature.find("dd", {"class": "posts"}).text[1:-3]
        post.append(num_post)
        num_view = literature.find("dd", {"class": "views"}).text[1:-3]
        view.append(num_view)
        if int(num_post) != 0:
            reply = literature.find("dd", {"class": "lastpost"}).find("a", {"class": "username"}).text
            user.append(reply)
        date_added = literature.find("time").text
        addDate.append(date_added)
    nm = len(topic)




def abyssForum_links_parser(soup):

    # Returning all links that should be visited by the Crawler

    href = []
    #print(soup.find('table', {"class": "tborder clear"}).find(
     #   'tbody').find_all('tr', {"class": "inline_row"}))
    listing = soup.find_all('dl', {"class": "row-item topic_read"})

    for a in listing:
        link = a.find('div', {"class": "list-inner"}).find('a').get('href')

        href.append(link)

    return href