# Conflicts: # MarketPlaces/Initialization/marketsList.txt # MarketPlaces/Initialization/markets_mining.py # MarketPlaces/Initialization/prepare_parser.pymain
@ -0,0 +1,289 @@ | |||||
__author__ = 'DarkWeb' | |||||
''' | |||||
Incogsnoo Forum Crawler (Selenium) | |||||
''' | |||||
from selenium import webdriver | |||||
from selenium.common.exceptions import NoSuchElementException | |||||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||||
from selenium.webdriver.firefox.service import Service | |||||
from selenium.webdriver.common.by import By | |||||
import urllib.parse as urlparse | |||||
import os, time | |||||
from datetime import date | |||||
import subprocess | |||||
from bs4 import BeautifulSoup | |||||
from Forums.Initialization.prepare_parser import new_parse | |||||
from Forums.Incogsnoo.parser import incogsnoo_links_parser | |||||
from Forums.Utilities.utilities import cleanHTML | |||||
counter = 1 | |||||
baseURL = 'http://tedditfyn6idalzso5wam5qd3kdtxoljjhbrbbx34q2xkcisvshuytad.onion/' | |||||
# Opens Tor Browser, crawls the website, then parses, then closes tor | |||||
#acts like the main method for the crawler, another function at the end of this code calls this function later | |||||
def startCrawling(): | |||||
forumName = getForumName() | |||||
driver = getAccess() | |||||
if driver != 'down': | |||||
try: | |||||
crawlForum(driver) | |||||
except Exception as e: | |||||
print(driver.current_url, e) | |||||
closeDriver(driver) | |||||
new_parse(forumName, baseURL, True) | |||||
# Returns the name of the website | |||||
#return: name of site in string type | |||||
def getForumName(): | |||||
name = 'Incogsnoo' | |||||
return name | |||||
# Return the base link of the website | |||||
#return: url of base site in string type | |||||
def getFixedURL(): | |||||
url = 'http://tedditfyn6idalzso5wam5qd3kdtxoljjhbrbbx34q2xkcisvshuytad.onion/' | |||||
return url | |||||
# Closes Tor Browser | |||||
#@param: current selenium driver | |||||
def closeDriver(driver): | |||||
# global pid | |||||
# os.system("taskkill /pid " + str(pro.pid)) | |||||
# os.system("taskkill /t /f /im tor.exe") | |||||
print('Closing Tor...') | |||||
driver.close() | |||||
time.sleep(3) | |||||
return | |||||
# Creates FireFox 'driver' and configure its 'Profile' | |||||
# to use Tor proxy and socket | |||||
def createFFDriver(): | |||||
from Forums.Initialization.forums_mining import config | |||||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||||
ff_prof.set_preference("places.history.enabled", False) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||||
ff_prof.set_preference("signon.rememberSignons", False) | |||||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||||
ff_prof.set_preference("network.dns.disablePrefetch", True)#might need to turn off | |||||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||||
ff_prof.set_preference("permissions.default.image", 3) | |||||
ff_prof.set_preference("browser.download.folderList", 2) | |||||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||||
ff_prof.set_preference('network.proxy.type', 1) | |||||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||||
ff_prof.set_preference("javascript.enabled", True) | |||||
ff_prof.update_preferences() | |||||
service = Service(config.get('TOR', 'geckodriver_path')) | |||||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||||
driver.maximize_window() | |||||
return driver | |||||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||||
#return: return the selenium driver or string 'down' | |||||
def getAccess(): | |||||
url = getFixedURL() | |||||
driver = createFFDriver() | |||||
try: | |||||
driver.get(url) | |||||
return driver | |||||
except: | |||||
driver.close() | |||||
return 'down' | |||||
# Saves the crawled html page, makes the directory path for html pages if not made | |||||
def savePage(driver, page, url): | |||||
cleanPage = cleanHTML(driver, page) | |||||
filePath = getFullPathName(url) | |||||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||||
return | |||||
# Gets the full path of the page to be saved along with its appropriate file name | |||||
#@param: raw url as crawler crawls through every site | |||||
def getFullPathName(url): | |||||
from Forums.Initialization.forums_mining import config, CURRENT_DATE | |||||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages") | |||||
fileName = getNameFromURL(url) | |||||
if isDescriptionLink(url): | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') | |||||
else: | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') | |||||
return fullPath | |||||
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned | |||||
#@param: raw url as crawler crawls through every site | |||||
def getNameFromURL(url): | |||||
global counter | |||||
name = ''.join(e for e in url if e.isalnum()) | |||||
if (name == ''): | |||||
name = str(counter) | |||||
counter = counter + 1 | |||||
return name | |||||
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list | |||||
#in this example, there are a couple of categories some threads fall under such as | |||||
#exploits, malware, and hacking tutorials | |||||
def getInterestedLinks(): | |||||
links = [] | |||||
# Malware | |||||
links.append('http://tedditfyn6idalzso5wam5qd3kdtxoljjhbrbbx34q2xkcisvshuytad.onion/r/Malware') | |||||
# | |||||
return links | |||||
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through | |||||
#topic and description pages are crawled through here, where both types of pages are saved | |||||
#@param: selenium driver | |||||
def crawlForum(driver): | |||||
print("Crawling the Incogsnoo forum") | |||||
# edge cases: | |||||
# 1. if a comment thread goes too deep, need to click "continue this thread" to show more replies | |||||
# 2. the site will sometimes rate limit you and not show the contents. | |||||
# right now, there is no detection mechanism and it won't throw any errors | |||||
linksToCrawl = getInterestedLinks() | |||||
i = 0 | |||||
while i < len(linksToCrawl): | |||||
link = linksToCrawl[i] | |||||
print('Crawling :', link) | |||||
try: | |||||
has_next_page = True | |||||
count = 0 | |||||
while has_next_page: | |||||
try: | |||||
driver.get(link) | |||||
except: | |||||
driver.refresh() | |||||
html = driver.page_source | |||||
savePage(driver, html, link) | |||||
topics = topicPages(html) | |||||
for topic in topics: | |||||
has_next_topic_page = True | |||||
counter = 1 | |||||
page = topic | |||||
while has_next_topic_page: | |||||
itemURL = urlparse.urljoin(baseURL, str(page)) | |||||
try: | |||||
driver.get(itemURL) | |||||
except: | |||||
driver.refresh() | |||||
if isListingLink(driver.current_url): | |||||
break | |||||
savePage(driver, driver.page_source, topic + f"page{counter}") # very important | |||||
# comment out | |||||
if counter == 2: | |||||
break | |||||
try: | |||||
# incogsnoo doesn't have next button to load more pages of the description | |||||
link_tag = driver.find_element(by=By.XPATH, value="/html/body/div[2]/div[last()]/a[contains(text(),'next')]") | |||||
link = link_tag.get_attribute("href") | |||||
if link == "": | |||||
raise NoSuchElementException | |||||
counter += 1 | |||||
except NoSuchElementException: | |||||
has_next_topic_page = False | |||||
# making sure we go back to the listing page (browser back button simulation) | |||||
try: | |||||
driver.get(link) | |||||
except: | |||||
driver.refresh() | |||||
# comment out | |||||
# break | |||||
# comment out | |||||
if count == 1: | |||||
break | |||||
try: | |||||
link_tag = driver.find_element(by=By.XPATH, value="/html/body/div[2]/div[last()]/a[contains(text(),'next')]") | |||||
link = link_tag.get_attribute("href") | |||||
if link == "": | |||||
raise NoSuchElementException | |||||
count += 1 | |||||
except NoSuchElementException: | |||||
has_next_page = False | |||||
except Exception as e: | |||||
print(link, e) | |||||
i += 1 | |||||
print("Crawling the Incogsnoo forum done.") | |||||
# Returns 'True' if the link is a description link | |||||
#@param: url of any url crawled | |||||
#return: true if is a description page, false if not | |||||
def isDescriptionLink(url): | |||||
if 'comments' in url: | |||||
return True | |||||
return False | |||||
# Returns True if the link is a listingPage link | |||||
#@param: url of any url crawled | |||||
#return: true if is a Listing page, false if not | |||||
def isListingLink(url): | |||||
if isDescriptionLink(url): | |||||
return False | |||||
return True | |||||
# calling the parser to define the links, the html is the url of a link from the list of interested link list | |||||
#@param: link from interested link list | |||||
#return: list of description links that should be crawled through | |||||
def topicPages(html): | |||||
soup = BeautifulSoup(html, "html.parser") | |||||
#print(soup.find('div', {"class": "forumbg"}).find('ul', {"class": "topiclist topics"}).find('li', {"class": "row bg1"}).find('a', {"class": "topictitle"}, href=True)) | |||||
return incogsnoo_links_parser(soup) | |||||
def crawler(): | |||||
startCrawling() | |||||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,271 @@ | |||||
__author__ = 'DarkWeb' | |||||
# Here, we are importing the auxiliary functions to clean or convert data | |||||
from Forums.Utilities.utilities import * | |||||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||||
from bs4 import BeautifulSoup | |||||
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages) | |||||
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||||
#stores info it needs in different lists, these lists are returned after being organized | |||||
#@param: soup object looking at html page of description page | |||||
#return: 'row' that contains a variety of lists that each hold info on the description page | |||||
def incogsnoo_description_parser(soup): | |||||
# Fields to be parsed | |||||
topic = "-1" # 0 topic name ***$ | |||||
user = [] # 1 all users of each post ***$ author | |||||
status = [] # 2 all user's authority in each post such as (adm, member, dangerous) | |||||
reputation = [] # 3 all users's karma in each post (usually found as a number) ??? ups | |||||
interest = [] # 4 all user's interest in each post | |||||
sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post) | |||||
post = [] # 6 all messages of each post | |||||
feedback = [] # 7 all feedbacks of each user (this was found in just one Forum and with a number format) | |||||
addDate = [] # 8 all dated of each post ***$ created | |||||
image_user = [] # 9 all user avatars of each post | |||||
image_post = [] # 10 all first images of each post | |||||
# Finding the topic (should be just one coming from the Listing Page) | |||||
topic = soup.find("div", {"class": "title"}).find("h2").text | |||||
topic = topic.replace('"', '') | |||||
topic = cleanString(topic.strip()) | |||||
# the first post's html is separated from all subsequent comments/replies/posts to the first post | |||||
# so parse the first post by itself first | |||||
# Finding body of first post | |||||
post_text = soup.find("div", {"class": "md"}) | |||||
if post_text: | |||||
post_text = post_text.text.strip() | |||||
post.append(cleanString(post_text)) | |||||
else: # some posts just links to other sites/articles/videos and have no text by itself | |||||
post_link = soup.find("div", {"class": "title"}).find("a").get("href") | |||||
post_link = cleanLink(post_link) | |||||
post.append(post_link) | |||||
# User | |||||
p_tag = soup.find("p", {"class": "submitted"}) | |||||
author = p_tag.find("a") | |||||
if author: | |||||
author = author.text.strip() | |||||
elif "[deleted]" in p_tag.text: | |||||
author = "deleted" | |||||
else: | |||||
author = "-1" | |||||
user.append(cleanString(author)) | |||||
# Finding the status of the author | |||||
status.append("-1") | |||||
# Finding the reputation of the user | |||||
reputation.append("-1") | |||||
# Finding the interest of the author | |||||
interest.append("-1") | |||||
# Finding signature | |||||
sign.append("-1") | |||||
# Finding feedback | |||||
upvote = soup.find("div", {"class": "score"}).find("span") | |||||
if upvote: | |||||
upvote = upvote.text.strip() | |||||
else: | |||||
upvote = "-1" | |||||
feedback.append(cleanString(upvote)) | |||||
# Finding the date of the post - e.g. "Fri, 18 December 2023 05:49:20 GMT" | |||||
dt = soup.find("p", {"class": "submitted"}).find("span")["title"] | |||||
# Convert to datetime object - e.g. 2023-12-18 05:49:20 | |||||
date_time_obj = datetime.strptime(dt, '%a, %d %b %Y %H:%M:%S %Z') | |||||
sdate = date_time_obj.strftime('%m %d %Y') | |||||
stime = date_time_obj.strftime('%I:%M %p') | |||||
date = convertDate(sdate, "english", datetime.now()) + " " + stime | |||||
# e.g. "12/18/2023 05:49 AM" | |||||
addDate.append(date) | |||||
image_user.append("-1") | |||||
image_post.append("-1") | |||||
posts = soup.find("div", {"class": "comments"}).findAll("details") | |||||
# For each message (post), get all the fields we are interested to: | |||||
for ipost in posts: | |||||
# Finding user | |||||
p_tag = ipost.find("p", {"class": "author"}) | |||||
author = p_tag.find("a") | |||||
if author: | |||||
author = author.text.strip() | |||||
elif "[deleted]" in p_tag.text: | |||||
author = "deleted" | |||||
else: | |||||
author = "-1" | |||||
user.append(cleanString(author)) | |||||
# Finding the status of the author | |||||
status.append("-1") | |||||
# Finding the reputation of the user | |||||
reputation.append("-1") | |||||
# Finding the interest of the author | |||||
interest.append("-1") | |||||
# Finding signature | |||||
sign.append("-1") | |||||
# Finding the post | |||||
comment = ipost.find("div", {"class": "md"}) | |||||
if comment: | |||||
comment = comment.text.strip() | |||||
else: | |||||
comment = "-1" | |||||
post.append(cleanString(comment)) | |||||
# Finding feedback | |||||
upvote = ipost.find("p", {"class": "ups"}) | |||||
if upvote: | |||||
upvote = upvote.text.strip().split()[0] | |||||
else: | |||||
upvote = "-1" | |||||
feedback.append(cleanString(upvote)) | |||||
# Finding the date of the post - e.g. "Fri, 18 December 2023 05:49:20 GMT" | |||||
dt = ipost.find("p", {"class": "created"})["title"] | |||||
# Convert to datetime object - e.g. 2023-12-18 05:49:20 | |||||
date_time_obj = datetime.strptime(dt, '%a, %d %b %Y %H:%M:%S %Z') | |||||
sdate = date_time_obj.strftime('%m %d %Y') | |||||
stime = date_time_obj.strftime('%I:%M %p') | |||||
date = convertDate(sdate, "english", datetime.now()) + " " + stime | |||||
# e.g. "12/18/2023 05:49 AM" | |||||
addDate.append(date) | |||||
image_user.append("-1") | |||||
image_post.append("-1") | |||||
# Populate the final variable (this should be a list with all fields scraped) | |||||
row = (topic, user, status, reputation, interest, sign, post, feedback, addDate, image_user, image_post) | |||||
# Sending the results | |||||
return row | |||||
# This is the method to parse the Listing Pages (one page with many posts) | |||||
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||||
#stores info it needs in different lists, these lists are returned after being organized | |||||
#@param: soup object looking at html page of listing page | |||||
#return: 'row' that contains a variety of lists that each hold info on the listing page | |||||
def incogsnoo_listing_parser(soup): | |||||
nm = 0 # *this variable should receive the number of topics | |||||
forum = "Incogsnoo" # 0 *forum name | |||||
board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree. | |||||
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware) | |||||
author = [] # 2 *all authors of each topic | |||||
topic = [] # 3 *all topics | |||||
views = [] # 4 number of views of each topic | |||||
posts = [] # 5 number of posts of each topic | |||||
href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between | |||||
# Listing and Description pages) | |||||
addDate = [] # 7 when the topic was created (difficult to find) | |||||
image_author = [] # 8 all author avatars used in each topic | |||||
# Finding the board (should be just one) | |||||
board = soup.find("a", {"class": "subreddit"}).find("h2") | |||||
board = cleanString(board.text.strip()) | |||||
# Finding the repeated tag that corresponds to the listing of topics | |||||
itopics = soup.find("div", {"id": "links", "class": "sr"}).findAll("div", {"class": "link"}) | |||||
itopics.pop() | |||||
# Counting how many topics we have found so far | |||||
nm = len(itopics) | |||||
index = 0 | |||||
for itopic in itopics: | |||||
# Finding the author of the topic | |||||
p_tag = itopic.find("p", {"class": "submitted"}) | |||||
user = p_tag.find("a") | |||||
if user: | |||||
user = user.text.strip() | |||||
elif "[deleted]" in p_tag.text: | |||||
user = "deleted" | |||||
else: | |||||
user = "-1" | |||||
author.append(cleanString(user)) | |||||
# Adding the topic to the topic list | |||||
topic_title = itopic.find("div", {"class": "title"}).find("h2").text | |||||
topic.append(cleanString(topic_title)) | |||||
# Finding the number of Views | |||||
views.append("-1") | |||||
# Finding the number of posts | |||||
comments = itopic.find("a", {"class": "comments"}).text | |||||
number_comments = comments.split()[0] | |||||
posts.append(cleanString(number_comments)) | |||||
# Adding the url to the list of urls | |||||
link = itopic.find("a", {"class": "comments"}).get("href") | |||||
link = cleanLink(link) | |||||
href.append(link) | |||||
# Finding dates | |||||
p_tag = itopic.find("p", {"class": "submitted"}) | |||||
dt = p_tag.find("span")["title"] | |||||
date_time_obj = datetime.strptime(dt,'%a, %d %b %Y %H:%M:%S %Z') | |||||
sdate = date_time_obj.strftime('%m %d %Y') | |||||
stime = date_time_obj.strftime('%I:%M %p') | |||||
date = convertDate(sdate, "english", datetime.now()) + " " + stime | |||||
# e.g. "12/18/2023 05:49 AM" | |||||
addDate.append(date) | |||||
image_author.append("-1") | |||||
index += 1 | |||||
return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate, image_author) | |||||
#called by the crawler to get description links on a listing page | |||||
#@param: beautifulsoup object that is using the correct html page (listing page) | |||||
#return: list of description links from a listing page | |||||
def incogsnoo_links_parser(soup): | |||||
# Returning all links that should be visited by the Crawler | |||||
href = [] | |||||
listing_parent = soup.find("div", {"id": "links", "class": "sr"}) | |||||
listing = listing_parent.findAll("div", {"class": "entry"}) | |||||
count = 0 | |||||
for entry in listing: | |||||
parent_div = entry.find("div", {"class": "meta"}).find("div", {"class", "links"}) | |||||
a_tag = parent_div.find("a", {"class", "comments"}) | |||||
if a_tag: | |||||
href.append(a_tag.get("href")) | |||||
# if count == 10: | |||||
# break | |||||
count += 1 | |||||
return href |
@ -0,0 +1,352 @@ | |||||
__author__ = 'DarkWeb' | |||||
''' | |||||
Royal Marketplace Crawler (Selenium) | |||||
''' | |||||
from selenium import webdriver | |||||
from selenium.webdriver.support.select import Select | |||||
from selenium.common.exceptions import NoSuchElementException | |||||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||||
from selenium.webdriver.firefox.service import Service | |||||
from selenium.webdriver.common.by import By | |||||
from selenium.webdriver.support import expected_conditions as EC | |||||
from selenium.webdriver.support.ui import WebDriverWait | |||||
from PIL import Image | |||||
import urllib.parse as urlparse | |||||
import os, re, time | |||||
from datetime import date | |||||
import subprocess | |||||
from bs4 import BeautifulSoup | |||||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||||
from MarketPlaces.TheDarkMarket.parser import darkmarket_links_parser | |||||
from MarketPlaces.Utilities.utilities import cleanHTML | |||||
counter = 1 | |||||
baseURL = 'http://dark3xolguutzr2cn5twjyu6c3db2z3ai3aqyqascml5cdrleh3s2hqd.onion/' | |||||
# Opens Tor Browser, crawls the website | |||||
def startCrawling(): | |||||
marketName = getMarketName() | |||||
driver = getAccess() | |||||
if driver != 'down': | |||||
try: | |||||
crawlForum(driver) | |||||
except Exception as e: | |||||
print(driver.current_url, e) | |||||
closeDriver(driver) | |||||
new_parse(marketPlace=marketName, url=baseURL, createLog=True) | |||||
def captcha(driver): | |||||
''' | |||||
# wait for captcha page | |||||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||||
(By.XPATH, "/html/body/div[2]/div/div/div/div/form/div/div[2]/button"))) | |||||
inputChars = driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div/div/form/div/div[2]/div[1]/input') | |||||
inputNum = driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div/div/form/div/div[2]/div[2]/input') | |||||
driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div/div/form/div/div[1]/div/div').screenshot( | |||||
r'..\Royal\captcha1.png') | |||||
im = Image.open(r'..\Royal\captcha1.png') | |||||
im.show() | |||||
chars = input("Enter characters: ") | |||||
inputChars.send_keys(chars) | |||||
num = input("Enter number of wrong puzzle pieces: ") | |||||
inputNum.send_keys(num) | |||||
# click the verify(submit) button | |||||
driver.find_element(by=By.XPATH, value="/html/body/div[2]/div/div/div/div/form/div/div[2]/button").click() | |||||
''' | |||||
input("Press ENTER when CAPTCHA is completed\n") | |||||
# wait for login page | |||||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||||
(By.XPATH, "/html/body/div[2]/div/div/div[2]/h1"))) | |||||
''' | |||||
temp = driver.find_element(by=By.XPATH, value='/html/body/div/div/form/div[1]') | |||||
boxes = temp.find_elements(by=By.TAG_NAME, value='input') | |||||
for box in boxes: | |||||
# click box to update captcha image | |||||
box.click() | |||||
# save clock captcha to local | |||||
time.sleep(1) | |||||
driver.find_element(by=By.XPATH, value='/html/body/div/div/form/div[1]/div').screenshot( | |||||
r'..\Royal\captcha1.png') | |||||
im = Image.open(r'..\Royal\captcha1.png') | |||||
im.show() | |||||
letter = input("Enter letter: ") | |||||
box.send_keys(letter) | |||||
# click the verify(submit) button | |||||
driver.find_element(by=By.XPATH, value="/html/body/div/div/form/button[1]").click() | |||||
# wait for login page | |||||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||||
(By.XPATH, "/html/body/div[1]/div/div/div[2]/form/input[3]"))) | |||||
''' | |||||
# Login using premade account credentials and do login captcha manually | |||||
def login(driver): | |||||
# wait for login page | |||||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||||
(By.XPATH, "/html/body/div[2]/div/div/div[2]/form/div[4]"))) | |||||
# entering username and password into input boxes | |||||
usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]') | |||||
# Username here | |||||
usernameBox.send_keys('blabri') | |||||
passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]') | |||||
# Password here | |||||
passwordBox.send_keys('fishowal') | |||||
# click "Login" | |||||
driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div[2]/form/div[4]').click() | |||||
''' | |||||
# wait for captcha page show up | |||||
time.sleep(3) | |||||
# save captcha to local | |||||
driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div[2]/form/div[4]/label/div/div').screenshot( | |||||
r'..\Royal\captcha2.png') | |||||
# This method will show image in any image viewer | |||||
im = Image.open(r'..\Royal\captcha2.png') | |||||
im.show() | |||||
# ask user input captcha solution in terminal | |||||
userIn = input("Enter location of wrong pieces (squares are numbered 1-24 left to right, # # #): ") | |||||
squares = userIn.split() | |||||
# send user solution into the input space | |||||
for id in squares: | |||||
driver.find_element(by=By.XPATH, value='//*[@id="cl[' + str((int(id)-1)) + ']"]').click() | |||||
# click the verify(submit) button | |||||
driver.find_element(by=By.XPATH, value="/html/body/div[2]/div/div/div[2]/form/div[4]/label/div/div/div/button").click() | |||||
''' | |||||
input("Press ENTER when CAPTCHA is completed\n") | |||||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||||
WebDriverWait(driver, 50).until(EC.visibility_of_element_located( | |||||
(By.XPATH, '/html/body/div[3]/div/div[5]/div[1]'))) | |||||
# Returns the name of the website | |||||
def getMarketName(): | |||||
name = 'TheDarkMarket' | |||||
return name | |||||
# Return the link of the website | |||||
def getFixedURL(): | |||||
url = 'http://dark3xolguutzr2cn5twjyu6c3db2z3ai3aqyqascml5cdrleh3s2hqd.onion/' | |||||
return url | |||||
# Closes Tor Browser | |||||
def closeDriver(driver): | |||||
# global pid | |||||
# os.system("taskkill /pid " + str(pro.pid)) | |||||
# os.system("taskkill /t /f /im tor.exe") | |||||
print('Closing Tor...') | |||||
driver.close() | |||||
time.sleep(3) | |||||
return | |||||
# Creates FireFox 'driver' and configure its 'Profile' | |||||
# to use Tor proxy and socket | |||||
def createFFDriver(): | |||||
from MarketPlaces.Initialization.markets_mining import config | |||||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||||
ff_prof.set_preference("places.history.enabled", False) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||||
ff_prof.set_preference("signon.rememberSignons", False) | |||||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||||
# ff_prof.set_preference("network.dns.disablePrefetch", True) | |||||
# ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||||
ff_prof.set_preference("permissions.default.image", 3) | |||||
ff_prof.set_preference("browser.download.folderList", 2) | |||||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||||
ff_prof.set_preference('network.proxy.type', 1) | |||||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||||
ff_prof.set_preference("javascript.enabled", False) | |||||
ff_prof.update_preferences() | |||||
service = Service(config.get('TOR', 'geckodriver_path')) | |||||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||||
driver.maximize_window() | |||||
return driver | |||||
def getAccess(): | |||||
url = getFixedURL() | |||||
driver = createFFDriver() | |||||
try: | |||||
driver.get(url) | |||||
return driver | |||||
except: | |||||
driver.close() | |||||
return 'down' | |||||
# Saves the crawled html page | |||||
def savePage(driver, page, url): | |||||
cleanPage = cleanHTML(driver, page) | |||||
filePath = getFullPathName(url) | |||||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||||
return | |||||
# Gets the full path of the page to be saved along with its appropriate file name | |||||
def getFullPathName(url): | |||||
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE | |||||
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMarketName() + "/HTML_Pages") | |||||
fileName = getNameFromURL(url) | |||||
if not isListingLink(url): | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html') | |||||
else: | |||||
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html') | |||||
return fullPath | |||||
# Creates the file name from passed URL | |||||
def getNameFromURL(url): | |||||
global counter | |||||
name = ''.join(e for e in url if e.isalnum()) | |||||
if name == '': | |||||
name = str(counter) | |||||
counter = counter + 1 | |||||
return name | |||||
def getInterestedLinks(): | |||||
links = [] | |||||
# Digital - Fraud Software | |||||
links.append(baseURL + 'product-category/hacking/') | |||||
# # Digital - Guides and Tutorials | |||||
# links.append('http://royalrnpvfbodtt5altnnzano6hquvn2d5qy55oofc2zyqciogcevrad.onion/category/Guides%20&%20Tutorials') | |||||
# # Digital - Legitimate Software | |||||
# links.append('http://royalrnpvfbodtt5altnnzano6hquvn2d5qy55oofc2zyqciogcevrad.onion/category/Legitimiate%20Software') | |||||
# # Services - Carding | |||||
# links.append('http://royalrnpvfbodtt5altnnzano6hquvn2d5qy55oofc2zyqciogcevrad.onion/category/Carding') | |||||
return links | |||||
def crawlForum(driver): | |||||
print("Crawling The Dark Market") | |||||
linksToCrawl = getInterestedLinks() | |||||
i = 0 | |||||
while i < len(linksToCrawl): | |||||
link = linksToCrawl[i] | |||||
print('Crawling :', link) | |||||
try: | |||||
has_next_page = True | |||||
count = 0 | |||||
while has_next_page: | |||||
try: | |||||
driver.get(link) | |||||
except: | |||||
driver.refresh() | |||||
html = driver.page_source | |||||
savePage(driver, html, link) | |||||
list = productPages(html) | |||||
for item in list: | |||||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||||
try: | |||||
driver.get(itemURL) | |||||
except: | |||||
driver.refresh() | |||||
savePage(driver, driver.page_source, item) | |||||
driver.back() | |||||
# comment out | |||||
# break | |||||
# comment out | |||||
# if count == 1: | |||||
# break | |||||
# Try finding next page | |||||
try: | |||||
nav = driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div[1]/div[2]/nav') | |||||
li = nav.find_elements(by=By.TAG_NAME, value='li') | |||||
a = li[-1].find_element(by=By.TAG_NAME, value='a') | |||||
link = a.get_attribute('href') | |||||
if link == "": | |||||
raise NoSuchElementException | |||||
count += 1 | |||||
except NoSuchElementException: | |||||
has_next_page = False | |||||
except Exception as e: | |||||
print(link, e) | |||||
i += 1 | |||||
input("Crawling Royal forum done sucessfully. Press ENTER to continue\n") | |||||
# Returns 'True' if the link is Topic link | |||||
def isDescriptionLink(url): | |||||
if '/product/' in url: | |||||
return True | |||||
return False | |||||
# Returns True if the link is a listingPage link | |||||
def isListingLink(url): | |||||
if 'category' in url: | |||||
return True | |||||
return False | |||||
# calling the parser to define the links | |||||
def productPages(html): | |||||
soup = BeautifulSoup(html, "html.parser") | |||||
return darkmarket_links_parser(soup) | |||||
def crawler(): | |||||
startCrawling() | |||||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,182 @@ | |||||
__author__ = 'DarkWeb' | |||||
# Here, we are importing the auxiliary functions to clean or convert data | |||||
from MarketPlaces.Utilities.utilities import * | |||||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||||
from bs4 import BeautifulSoup, ResultSet, Tag | |||||
# This is the method to parse the Description Pages (one page to each Product in the Listing Pages) | |||||
def darkmarket_description_parser(soup: BeautifulSoup): | |||||
# Fields to be parsed | |||||
vendor = "-1" # 0 *Vendor_Name | |||||
success = "-1" # 1 Vendor_Successful_Transactions | |||||
rating_vendor = "-1" # 2 Vendor_Rating | |||||
name = "-1" # 3 *Product_Name | |||||
describe = "-1" # 4 Product_Description | |||||
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||||
MS = "-1" # 6 Product_MS_Classification (Microsoft Security) | |||||
category = "-1" # 7 Product_Category | |||||
views = "-1" # 8 Product_Number_Of_Views | |||||
reviews = "-1" # 9 Product_Number_Of_Reviews | |||||
rating_item = "-1" # 10 Product_Rating | |||||
addDate = "-1" # 11 Product_AddedDate | |||||
BTC = "-1" # 12 Product_BTC_SellingPrice | |||||
USD = "-1" # 13 Product_USD_SellingPrice | |||||
EURO = "-1" # 14 Product_EURO_SellingPrice | |||||
sold = "-1" # 15 Product_QuantitySold | |||||
left = "-1" # 16 Product_QuantityLeft | |||||
shipFrom = "-1" # 17 Product_ShippedFrom | |||||
shipTo = "-1" # 18 Product_ShippedTo | |||||
image = "-1" | |||||
image_vendor = "-1" | |||||
details: Tag = soup.find("div", {"class": "wc-content"}) | |||||
vendor = details.find("div", {"class": "product_meta"}).find("a", {"class": "wcvendors_cart_sold_by_meta"}).text | |||||
name = details.find("h1", {"class": "product_title entry-title"}).text | |||||
describe_list = [ | |||||
elem.text for elem in | |||||
details.find("div", {"id": "tab-description"}).find_all() | |||||
if elem.name != "h2" | |||||
] | |||||
describe = " ".join(describe_list) | |||||
categories_list: ResultSet[Tag] = details.find("span", {"class": "posted_in"}).find_all("a") | |||||
category = "Hacking" | |||||
reviews = details.find("div", {"class": "review-link"}).get("title") | |||||
rating_item = details.find("div", {"class": "star-rating"}).get('title') | |||||
price_container = details.find("p", {"class": "price"}) | |||||
if not price_container.find("ins"): | |||||
USD = price_container.find("span", {"class": "woocommerce-Price-amount amount"}).text.replace("$", "") | |||||
else: | |||||
USD = price_container.find("ins").find("span", {"class": "woocommerce-Price-amount amount"}).text.replace("$", "") | |||||
# print(f"\n[desc] Product: {name}") | |||||
# print(f"[desc] Price: ${USD}\n") | |||||
# Populating the final variable (this should be a list with all fields scraped) | |||||
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate, | |||||
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, image_vendor) | |||||
# Sending the results | |||||
return row | |||||
# This is the method to parse the Listing Pages | |||||
def darkmarket_listing_parser(soup: BeautifulSoup): | |||||
# Fields to be parsed | |||||
nm = 0 # *Total_Products (Should be Integer) | |||||
mktName = "TheDarkMarket" # 0 *Marketplace_Name | |||||
vendor = [] # 1 *Vendor y | |||||
rating_vendor = [] # 2 Vendor_Rating | |||||
success = [] # 3 Vendor_Successful_Transactions | |||||
name = [] # 4 *Product_Name y | |||||
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||||
MS = [] # 6 Product_MS_Classification (Microsoft Security) | |||||
category = [] # 7 Product_Category y | |||||
describe = [] # 8 Product_Description | |||||
views = [] # 9 Product_Number_Of_Views | |||||
reviews = [] # 10 Product_Number_Of_Reviews | |||||
rating_item = [] # 11 Product_Rating | |||||
addDate = [] # 12 Product_AddDate | |||||
BTC = [] # 13 Product_BTC_SellingPrice | |||||
USD = [] # 14 Product_USD_SellingPrice y | |||||
EURO = [] # 15 Product_EURO_SellingPrice | |||||
sold = [] # 16 Product_QuantitySold | |||||
qLeft =[] # 17 Product_QuantityLeft | |||||
shipFrom = [] # 18 Product_ShippedFrom | |||||
shipTo = [] # 19 Product_ShippedTo | |||||
image = [] | |||||
image_vendor = [] | |||||
href = [] # 20 Product_Links | |||||
products_list: ResultSet[Tag] = soup.find("ul", {"class": "products columns-3"}).find_all("li") | |||||
for product in products_list: | |||||
nm += 1 | |||||
product_vendor = product.find("small", {"class": "wcvendors_sold_by_in_loop"}).find("a").text | |||||
vendor.append(cleanString(product_vendor)) | |||||
# rating_vendor.append("-1") | |||||
# success.append("-1") | |||||
product_name = product.find("h2", {"class": "woocommerce-loop-product__title"}).text | |||||
name.append(cleanString(product_name)) | |||||
# CVE.append("-1") | |||||
# MS.append("-1") | |||||
product_category = product.find("div", {"class": 'product-categories'}).text | |||||
category.append(cleanString(product_category)) | |||||
# describe.append("-1") | |||||
# views.append("-1") | |||||
# reviews.append("-1") | |||||
product_rating = product.find("div", {"class": "star-rating"}).get("title") | |||||
rating_item.append(cleanString(product_rating)) | |||||
# addDate.append(datetime.now().strftime("%m/%d/%Y ")) | |||||
# BTC.append("-1") | |||||
price_container = product.find("span", {"class": "price"}) | |||||
if not price_container.find("ins"): | |||||
product_price = price_container.find("span", {"class": "woocommerce-Price-amount amount"}).text.replace("$", "") | |||||
else: | |||||
product_price = price_container.find("ins").find("span", {"class": "woocommerce-Price-amount amount"}).text.replace("$", "") | |||||
USD.append(cleanNumbers(product_price)) | |||||
# EURO.append("-1") | |||||
# sold.append("-1") | |||||
# qLeft.append("-1") | |||||
# shipTo.append("-1") | |||||
# shipFrom.append("-1") | |||||
product_href = product.find("a", {"class": "woocommerce-LoopProduct-link woocommerce-loop-product__link"}).get("href") | |||||
href.append(product_href) | |||||
# print(f"\n[list] Product: {product_name}") | |||||
# print(f"[list] Links: ${product_href}\n") | |||||
product_images_list = product.find("a", {"class": "tf-loop-product-thumbs-link"}).find("img").get("data-srcset").split(" ") | |||||
product_image = product_images_list[0] | |||||
image.append(product_image) | |||||
# Populate the final variable (this should be a list with all fields scraped) | |||||
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views, | |||||
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, | |||||
image_vendor) | |||||
def darkmarket_links_parser(soup: BeautifulSoup): | |||||
# Returning all links that should be visited by the Crawler | |||||
href = [] | |||||
listing: ResultSet[Tag] = soup.find("ul", {"class": "products columns-3"}).find_all("li") | |||||
for li in listing: | |||||
a = li.find('a', {"class": "woocommerce-LoopProduct-link woocommerce-loop-product__link"}) | |||||
link = a.get('href') | |||||
href.append(link) | |||||
print(f"Links: {href}") | |||||
return href |