__author__ = 'DarkWeb'

# Here, we are importing the auxiliary functions to clean or convert data
from Forums.Utilities.utilities import *
from datetime import date
from datetime import timedelta
import re

# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup

# This is the method to parse the Description Pages (one page to each topic in the Listing Pages)

def dwForums_description_parser(soup):

    # Fields to be parsed

    topic = "-1"            # 0 *topic name
    user = []               # 1 *all users of each post
    status = []             # 2 all user's authority in each post such as (adm, member, dangerous)
    reputation = []         # 3 all user's karma in each post (usually found as a number)
    interest = []           # 4 all user's interest in each post
    sign = []               # 5 all user's signature in each post (usually a standard message after the content of the post)
    post = []               # 6 all messages of each post
    feedback = []           # 7 all feedbacks of each vendor (this was found in just one Forum and with a number format)
    addDate = []            # 8 all dates of each post

    # Finding the topic (should be just one coming from the Listing Page)

    li = soup.find("h1", {"class": "p-title-value"})

    topic = li.text
    topic = topic.replace(u'\xa0', ' ')
    topic = topic.replace(",","")
    topic = topic.replace("\n","")
    topic = cleanString(topic.strip())
    # print(topic)
    # Finding the repeated tag that corresponds to the listing of posts

    # posts = soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg"}) + \
    #         soup.find("form", {"name": "quickModForm"}).findAll('div', {"class": "windowbg2"})

    posts = soup.find('div', {"class": "js-replyNewMessageContainer"}).find_all(
        'article', {"class": "js-post"}, recursive=False)
    # print(len(posts))

    # For each message (post), get all the fields we are interested to:

    for ipost in posts:

        # Finding a first level of the HTML page

        # post_wrapper = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "poster"})
        post_wrapper = ipost.find('h4', {"class": "message-name"})
        # Finding the author (user) of the post

        # author = post_wrapper.find('h4')
        author = post_wrapper.text.strip()
        # print("author " + author)
        user.append(cleanString(author))  # Remember to clean the problematic characters

        # Finding the status of the author



        # Testing here two possibilities to find this status and combine them
        # if ipost.find('h5', {"class": "deleted_post_author"}):
        #     status.append(-1)
        #     interest.append(-1)
        #     reputation.append(-1)
        #     addDate.append(-1)
        #     post.append("THIS POST HAS BEEN REMOVED!")
        #     sign.append(-1)
        #     feedback.append(-1)
        #     continue

        # CryptBB does have membergroup and postgroup

        membergroup = ipost.find('h5', {"class": "userTitle"})
        # DWForums doesnt have postgroups
        postgroup = None
        if membergroup != None:
            membergroup = membergroup.text.strip()
            if postgroup != None:
                postgroup = postgroup.text.strip()
                membergroup = membergroup + " - " + postgroup
        else:
            if postgroup != None:
                membergroup = postgroup.text.strip()
            else:
                membergroup = "-1"

        status.append(cleanString(membergroup))
        # print("status " + cleanString(membergroup))
        # Finding the interest of the author
        # DWForums does not have blurb
        blurb = ipost.find('li', {"class": "blurb"})
        if blurb != None:
            blurb = blurb.text.strip()
        else:
            blurb = "-1"
        interest.append(cleanString(blurb))

        # Finding the reputation of the user
        # CryptBB does have reputation
        author_stats = ipost.find('div', {"class": "message-userExtras"})
        if author_stats != None:
            karma = author_stats.find_all('dl', {"class": "pairs"})[2]
        else:
            karma = None
        if karma != None:
            karma = karma.text
            karma = karma.replace("Reaction score","")
            karma = karma.replace(":", "")
            karma = karma.strip()
        else:
            karma = "-1"
        reputation.append(cleanString(karma))
        # print("karma " + cleanString(karma))
        # Getting here another good tag to find the post date, post content and users' signature

        postarea = ipost.find('div', {"class": "message-attribution-main"})

        dt = postarea.find('time', {"class": "u-dt"})['datetime']
        # dt = dt.strip().split()
        dt = dt.strip()[:16]
        dt = dt.replace("T",", ")
        day=date.today()
        if "Yesterday" in dt:
            yesterday = day - timedelta(days=1)
            yesterday = yesterday.strftime('%m-%d-%Y')
            stime = dt.replace('Yesterday,','').strip()
            date_time_obj = yesterday+ ', '+stime
            date_time_obj = datetime.strptime(date_time_obj,'%m-%d-%Y, %H:%M')
        elif "hours ago" in dt:
            day = day.strftime('%m-%d-%Y')
            date_time_obj = postarea.find('span', {"class": "post_date"}).find('span')['title']
            date_time_obj = datetime.strptime(date_time_obj, '%m-%d-%Y, %H:%M')
        else:
            date_time_obj = datetime.strptime(dt, '%Y-%m-%d, %H:%M')
            stime = date_time_obj.strftime('%b %d, %Y')
            sdate = date_time_obj.strftime('%I:%M %p')


        addDate.append(date_time_obj)
        # print("date " + str(date_time_obj))
        # Finding the date of the post
        # date_time_obj = datetime.strptime(dt, '%a %b %d, %Y %I:%M %p')
        # smalltext = postarea.find('div', {"class": "flow_hidden"}).find('div', {"class": "keyinfo"})\
        #     .find('div', {"class": "smalltext"})
        # sdatetime = smalltext.text
        # sdatetime = sdatetime.replace(u"\xab","") # Removing unnecessary characters
        # sdatetime = sdatetime.replace(u"\xbb","") # Removing unnecessary characters
        # sdatetime = sdatetime.split("on: ")       # Removing unnecessary characters
        # sdatetime = sdatetime[1].strip()
        # stime = sdatetime[:-12:-1]                # Finding the time of the post
        # stime = stime[::-1]
        # sdate = sdatetime.replace(stime,"")       # Finding the date of the post
        # sdate = sdate.replace(",","")
        # sdate = sdate.strip()

        # Covert the date of the post that can be informed as: "12 February 2016", "today", "yesterday". We need
        # a date format here as "mm/dd/yyyy"

        # addDate.append(convertDate(sdate,"english", crawlerDate) + " " + stime)

        # Finding the post

        inner = ipost.find('article', {"class": "message-body"})
        inner = inner.text.strip()
        # print(inner)
        post.append(cleanString(inner))

        # Finding the users's signature

        # signature = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "moderatorbar"}).find('div', {"class": "signature"})
        signature = ipost.find('aside', {"class": "message-signature"})
        if signature != None:
            signature = signature.text.strip()
            # print(signature)
        else:
            signature = "-1"
        sign.append(cleanString(signature))

        # As no information about users's feedback was found, just assign "-1" to the variable

        feedback.append("-1")

    # Populate the final variable (this should be a list with all fields scraped)

    row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)

    # Sending the results

    return row

# This is the method to parse the Listing Pages (one page with many posts)

def dwForums_listing_parser(soup):

    nm = 0              # *this variable should receive the number of topics
    forum = "DWForums"  # 0 *forum name
    board = "-1"        # 1 *board name (the previous level of the topic in the Forum categorization tree.
                        # For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
    author = []         # 2 *all authors of each topic
    topic = []          # 3 *all topics
    views = []          # 4 number of views of each topic
    posts = []          # 5 number of posts of each topic
    href = []           # 6 this variable should receive all cleaned urls (we will use this to do the marge between
                        # Listing and Description pages)
    addDate = []        # 7 when the topic was created (difficult to find)

    # Finding the board (should be just one)

    board = soup.find('h1', {"class": "p-title-value"}).text
    board = cleanString(board.strip())

    # Finding the repeated tag that corresponds to the listing of topics

    regex = re.compile('.*structItem--thread.*')
    itopics = soup.find_all("div", {"class": regex})

    for itopic in itopics:

        # For each topic found, the structure to get the rest of the information can be of two types. Testing all of them
        # to don't miss any topic

        # tds = itopic.findAll('td', {"class": "subject stickybg2"})
        #
        # if len(tds) > 0:
        #    tag.append("strong")
        #    tag.append("subject stickybg2")
        #    tag.append("stats stickybg")
        # else:
        #    tds = itopic.findAll('td', {"class": "subject windowbg2"})
        #    if len(tds) > 0:
        #       tag.append("span")
        #       tag.append("subject windowbg2")
        #       tag.append("stats windowbg")

        # Adding the topic to the topic list
        topics = itopic.find("div", {"class": "structItem-title"}).text
        topics = topics.replace(",", "")
        topics = topics.replace("\n", "")
        topic.append(cleanString(topics.strip()))

        # Counting how many topics we have found so far

        nm = len(topic)

        # Adding the url to the list of urls
        link = itopic.select_one('a[href^="/threads/"]')
        link = link['href']
        link = cleanLink(link)
        href.append(link)

        # Finding the author of the topic
        minor = itopic.find('div', {"class": "structItem-minor"})
        ps = minor.find('li').text
        user = ps.strip()
        author.append(cleanString(user))

        # Finding the number of replies
        meta = itopic.find("div", {"class": "structItem-cell--meta"})
        meta = meta.find_all("dl")
        post = meta[0].find("dd").text
        post = post.replace("K", "000")
        posts.append(cleanString(post))

        # Finding the number of Views
        tview = meta[1].find("dd").text
        tview = tview.replace("K", "000")
        views.append(cleanString(tview))

        # If no information about when the topic was added, just assign "-1" to the variable
        minor = itopic.find("div", {"class": "structItem-minor"})
        dt = minor.find('time')['datetime']
        dt = dt.strip()[:16]
        dt = dt.replace("T", ", ")
        day = date.today()
        if "Yesterday" in dt:
            yesterday = day - timedelta(days=1)
            yesterday = yesterday.strftime('%m-%d-%Y')
            stime = dt.replace('Yesterday,', '').strip()
            date_time_obj = yesterday + ', ' + stime
            date_time_obj = datetime.strptime(date_time_obj, '%m-%d-%Y, %H:%M')
        else:
            date_time_obj = datetime.strptime(dt, '%Y-%m-%d, %H:%M')
            stime = date_time_obj.strftime('%b %d, %Y')
            sdate = date_time_obj.strftime('%I:%M %p')
        addDate.append(date_time_obj)

    return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate)


def dwForums_links_parser(soup):

    # Returning all links that should be visited by the Crawler

    href = []
    #print(soup.find('table', {"class": "tborder clear"}).find(
     #   'tbody').find_all('tr', {"class": "inline_row"}))
    regex = re.compile('.*structItem--thread.*')
    listing = soup.find_all("div", {"class": regex})

    for a in listing:
        link = a.select_one('a[href^="/threads/"]')
        link = link['href']

        href.append(link)

    return href