@ -0,0 +1,330 @@ | |||
__author__ = 'DarkWeb' | |||
''' | |||
Cardingleaks Forum Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.common.by import By | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
import subprocess | |||
from bs4 import BeautifulSoup | |||
from Forums.Initialization.prepare_parser import new_parse | |||
from Forums.Cardingleaks.parser import cardingleaks_links_parser | |||
from Forums.Utilities.utilities import cleanHTML | |||
counter = 1 | |||
baseURL = 'https://cardingleaks.ws/' | |||
# Opens Tor Browser, crawls the website | |||
def startCrawling(): | |||
opentor() | |||
forumName = getForumName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
new_parse(forumName, baseURL, False) | |||
# Opens Tor Browser | |||
def opentor(): | |||
from Forums.Initialization.forums_mining import config | |||
global pid | |||
print("Connecting Tor...") | |||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Login using premade account credentials and do login captcha manually | |||
def login(driver): | |||
#click login button | |||
login_link = driver.find_element( | |||
by=By.XPATH, value='/html/body/div[2]/div[1]/nav/div/div[3]/div[1]/a[1]').\ | |||
get_attribute('href') | |||
driver.get(login_link)# open tab with url | |||
#entering username and password into input boxes | |||
usernameBox = driver.find_element(by=By.NAME, value='login') | |||
#Username here | |||
usernameBox.send_keys('somanyfrogs')#sends string to the username box | |||
passwordBox = driver.find_element(by=By.NAME, value='password') | |||
#Password here | |||
passwordBox.send_keys('therearewaytoomanyherehowwhy')# sends string to passwordBox | |||
login = driver.find_element(by=By.CLASS_NAME, value='block-container') | |||
login_link = login.find_element(by=By.TAG_NAME, value='button') | |||
login_link.click() | |||
input('input') | |||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||
# wait for 50 sec until id = tab_content is found, then cont | |||
WebDriverWait(driver, 50).until(EC.visibility_of_element_located( | |||
(By.CLASS_NAME, 'p-body-pageContent'))) | |||
# Returns the name of the website | |||
def getForumName() -> str: | |||
name = 'Cardingleaks' | |||
return name | |||
# Return the link of the website | |||
def getFixedURL(): | |||
url = 'https://cardingleaks.ws/' | |||
return url | |||
# Closes Tor Browser | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() #close tab | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
from Forums.Initialization.forums_mining import config | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 3) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", True) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
return driver | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
# Saves the crawled html page | |||
def savePage(page, url): | |||
cleanPage = cleanHTML(page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
def getFullPathName(url): | |||
from Forums.Initialization.forums_mining import config, CURRENT_DATE | |||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages") | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') | |||
else: | |||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') | |||
return fullPath | |||
# Creates the file name from passed URL | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if name == '': | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
def getInterestedLinks(): | |||
links = [] | |||
# # carding methods | |||
links.append('https://cardingleaks.ws/forums/carding-methods.82/') | |||
# # carding schools | |||
# links.append('https://cardingleaks.ws/forums/help-desk-carding-school.35/') | |||
# # carding discussion | |||
# links.append('https://cardingleaks.ws/forums/carding-discussion-desk.58/') | |||
# # carding tutorials | |||
# links.append('https://cardingleaks.ws/forums/carding-tutorials.13/') | |||
# # carding tools and software | |||
# links.append('https://cardingleaks.ws/forums/carding-tools-softwares.10/') | |||
# # exploits and cracking tools | |||
# links.append('https://cardingleaks.ws/forums/exploits-cracking-tools.22/') | |||
return links | |||
def crawlForum(driver): | |||
print("Crawling the Cardingleaks forum") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
i = 0 | |||
count = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
has_next_page = True | |||
while has_next_page: | |||
list = topicPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
driver.back() | |||
#variable to check if there is a next page for the topic | |||
has_next_topic_page = True | |||
counter = 1 | |||
# check if there is a next page for the topics | |||
while has_next_topic_page: | |||
# try to access next page of th topic | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
# if there is a next page then go and save.... | |||
# Spec | |||
try: | |||
# temp = driver.find_element(By.XPATH, '/html/body/div[2]/div[4]/div/div[5]/div[2]/div/div[1]/div[1]/div/nav/div[1]') # /html/body/div/div[2]/div/div[2]/div/ | |||
item = driver.find_element(by=By.LINK_TEXT, value='Next').get_attribute('href') #/html/body/div/div[2]/div/div[2]/div | |||
if item == "": | |||
raise NoSuchElementException | |||
else: | |||
counter += 1 | |||
except NoSuchElementException: | |||
has_next_topic_page = False | |||
# end of loop | |||
for i in range(counter): | |||
driver.back() | |||
# comment out | |||
break | |||
# comment out | |||
if count == 1: | |||
count = 0 | |||
break | |||
try: | |||
# temp = driver.find_element(by=By.XPATH, value = '/html/body/div[2]/div[4]/div/div[5]/div[2]/div/div/div[1]/div/nav/div[1]') | |||
link = driver.find_element(by=By.LINK_TEXT, value='Next').get_attribute('href') | |||
if link == "": | |||
raise NoSuchElementException | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("Crawling Cardingleaks forum done successfully. Press ENTER to continue\n") | |||
# Returns 'True' if the link is Topic link, may need to change for every website | |||
def isDescriptionLink(url): | |||
if 'threads' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link, may need to change for every website | |||
def isListingLink(url): | |||
if 'forums' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links | |||
def topicPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return cardingleaks_links_parser(soup) | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,247 @@ | |||
__author__ = 'DarkWeb' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from Forums.Utilities.utilities import * | |||
from datetime import date | |||
from datetime import timedelta | |||
import re | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages) | |||
def cryptBB_description_parser(soup): | |||
# Fields to be parsed | |||
topic = "-1" # 0 *topic name | |||
user = [] # 1 *all users of each post | |||
status = [] # 2 all user's authority in each post such as (adm, member, dangerous) | |||
reputation = [] # 3 all user's karma in each post (usually found as a number) | |||
interest = [] # 4 all user's interest in each post | |||
sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post) | |||
post = [] # 6 all messages of each post | |||
feedback = [] # 7 all feedbacks of each vendor (this was found in just one Forum and with a number format) | |||
addDate = [] # 8 all dates of each post | |||
# Finding the topic (should be just one coming from the Listing Page) | |||
li = soup.find("td", {"class": "thead"}).find('strong') | |||
topic = li.text | |||
topic = re.sub("\[\w*\]", '', topic) | |||
topic = topic.replace(",","") | |||
topic = topic.replace("\n","") | |||
topic = cleanString(topic.strip()) | |||
# Finding the repeated tag that corresponds to the listing of posts | |||
# try: | |||
posts = soup.find('table', {"class": "tborder tfixed clear"}).find('td', {"id": "posts_container"}).find_all( | |||
'div', {"class": "post"}) | |||
# For each message (post), get all the fields we are interested to: | |||
for ipost in posts: | |||
# Finding a first level of the HTML page | |||
post_wrapper = ipost.find('span', {"class": "largetext"}) | |||
# Finding the author (user) of the post | |||
author = post_wrapper.text.strip() | |||
user.append(cleanString(author)) # Remember to clean the problematic characters | |||
# Finding the status of the author | |||
smalltext = ipost.find('div', {"class": "post_author"}) | |||
''' | |||
# Testing here two possibilities to find this status and combine them | |||
if ipost.find('div', {"class": "deleted_post_author"}): | |||
status.append(-1) | |||
interest.append(-1) | |||
reputation.append(-1) | |||
addDate.append(-1) | |||
post.append("THIS POST HAS BEEN REMOVED!") | |||
sign.append(-1) | |||
feedback.append(-1) | |||
continue | |||
''' | |||
# CryptBB does have membergroup and postgroup | |||
membergroup = smalltext.find('div', {"class": "profile-rank"}) | |||
postgroup = smalltext.find('div', {"class": "postgroup"}) | |||
if membergroup != None: | |||
membergroup = membergroup.text.strip() | |||
if postgroup != None: | |||
postgroup = postgroup.text.strip() | |||
membergroup = membergroup + " - " + postgroup | |||
else: | |||
if postgroup != None: | |||
membergroup = postgroup.text.strip() | |||
else: | |||
membergroup = "-1" | |||
status.append(cleanString(membergroup)) | |||
# Finding the interest of the author | |||
# CryptBB does not have blurb | |||
blurb = smalltext.find('li', {"class": "blurb"}) | |||
if blurb != None: | |||
blurb = blurb.text.strip() | |||
else: | |||
blurb = "-1" | |||
interest.append(cleanString(blurb)) | |||
# Finding the reputation of the user | |||
# CryptBB does have reputation | |||
author_stats = smalltext.find('div', {"class": "author_statistics"}) | |||
karma = author_stats.find('strong') | |||
if karma != None: | |||
karma = karma.text | |||
karma = karma.replace("Community Rating: ", "") | |||
karma = karma.replace("Karma: ", "") | |||
karma = karma.strip() | |||
else: | |||
karma = "-1" | |||
reputation.append(cleanString(karma)) | |||
# Getting here another good tag to find the post date, post content and users' signature | |||
postarea = ipost.find('div', {"class": "post_content"}) | |||
dt = postarea.find('span', {"class": "post_date"}).text | |||
# dt = dt.strip().split() | |||
dt = dt.strip() | |||
day=date.today() | |||
if "Yesterday" in dt: | |||
yesterday = day - timedelta(days=1) | |||
yesterday = yesterday.strftime('%m-%d-%Y') | |||
stime = dt.replace('Yesterday,','').strip() | |||
date_time_obj = yesterday+ ', '+stime | |||
date_time_obj = datetime.strptime(date_time_obj,'%m-%d-%Y, %I:%M %p') | |||
elif "hours ago" in dt: | |||
day = day.strftime('%m-%d-%Y') | |||
date_time_obj = postarea.find('span', {"class": "post_date"}).find('span')['title'] | |||
date_time_obj = datetime.strptime(date_time_obj, '%m-%d-%Y, %I:%M %p') | |||
else: | |||
date_time_obj = datetime.strptime(dt, '%m-%d-%Y, %I:%M %p') | |||
stime = date_time_obj.strftime('%b %d, %Y') | |||
sdate = date_time_obj.strftime('%I:%M %p') | |||
addDate.append(date_time_obj) | |||
# Finding the post | |||
inner = postarea.find('div', {"class": "post_body scaleimages"}) | |||
inner = inner.text.strip() | |||
post.append(cleanString(inner)) | |||
# Finding the user's signature | |||
# signature = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "moderatorbar"}).find('div', {"class": "signature"}) | |||
signature = ipost.find('div', {"class": "signature scaleimages"}) | |||
if signature != None: | |||
signature = signature.text.strip() | |||
# print(signature) | |||
else: | |||
signature = "-1" | |||
sign.append(cleanString(signature)) | |||
# As no information about user's feedback was found, just assign "-1" to the variable | |||
feedback.append("-1") | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
row = (topic, user, status, reputation, interest, sign, post, feedback, addDate) | |||
# Sending the results | |||
return row | |||
# This is the method to parse the Listing Pages (one page with many posts) | |||
def cryptBB_listing_parser(soup): | |||
nm = 0 # *this variable should receive the number of topics | |||
forum = "OnniForums" # 0 *forum name | |||
board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree. | |||
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware) | |||
author = [] # 2 *all authors of each topic | |||
topic = [] # 3 *all topics | |||
views = [] # 4 number of views of each topic | |||
posts = [] # 5 number of posts of each topic | |||
href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between | |||
# Listing and Description pages) | |||
addDate = [] # 7 when the topic was created (difficult to find) | |||
# Finding the board (should be just one) | |||
board = soup.find('span', {"class": "active"}).text | |||
board = cleanString(board.strip()) | |||
# Finding the repeated tag that corresponds to the listing of topics | |||
itopics = soup.find_all('tr',{"class": "inline_row"}) | |||
for itopic in itopics: | |||
# For each topic found, the structure to get the rest of the information can be of two types. Testing all of them | |||
# to don't miss any topic | |||
# Adding the topic to the topic list | |||
try: | |||
topics = itopic.find('span', {"class": "subject_old"}).find('a').text | |||
except: | |||
topics = itopic.find('span', {"class": "subject_new"}).find('a').text | |||
topics = re.sub("\[\w*\]", '', topics) | |||
topic.append(cleanString(topics)) | |||
# Counting how many topics we have found so far | |||
nm = len(topic) | |||
# Adding the url to the list of urls | |||
try: | |||
link = itopic.find('span', {"class": "subject_old"}).find('a').get('href') | |||
except: | |||
link = itopic.find('span',{"class": "subject_new"}).find('a').get('href') | |||
href.append(link) | |||
# Finding the author of the topic | |||
ps = itopic.find('div', {"class":"author smalltext"}).find('a').text | |||
user = ps.strip() | |||
author.append(cleanString(user)) | |||
# Finding the number of replies | |||
columns = itopic.findChildren('td',recursive=False) | |||
replies = columns[3].text | |||
posts.append(cleanString(replies)) | |||
# Finding the number of Views | |||
tview = columns[4].text | |||
views.append(cleanString(tview)) | |||
# If no information about when the topic was added, just assign "-1" to the variable | |||
addDate.append("-1") | |||
return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate) | |||
def cardingleaks_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.find_all('div', {"class": "structItem-title"}) | |||
for a in listing: | |||
link = a.find('a').get('href') | |||
href.append(link) | |||
return href |