@ -1,4 +1,4 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11 (venv)" project-jdk-type="Python SDK" /> | |||
<component name="ProjectRootManager" version="2" project-jdk-name="C:\Users\Helium\anaconda3" project-jdk-type="Python SDK" /> | |||
</project> |
@ -1,257 +0,0 @@ | |||
__author__ = '91Shadows' | |||
''' | |||
CryptBB Crawler (Mechanize) | |||
''' | |||
import codecs, os, re | |||
import socks, socket, time | |||
from datetime import date | |||
import urllib.parse as urlparse | |||
import http.client as httplib | |||
import mechanize | |||
import subprocess | |||
from bs4 import BeautifulSoup | |||
from Forums.Initialization.prepare_parser import new_parse | |||
from Forums.BestCardingWorld.parser import bestcardingworld_links_parser | |||
counter = 1 | |||
httplib.HTTPConnection._http_vsn = 10 | |||
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0' | |||
baseURL = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=42&sid=ee2cbfd73c12923d979790b2bb4bdfd5' | |||
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150) | |||
# Opens Tor Browser, crawls the website | |||
def startCrawling(): | |||
opentor() | |||
getUrl() | |||
forumName = getForumName() | |||
br = getAccess() | |||
if br != 'down': | |||
crawlForum(br) | |||
new_parse(forumName, False) | |||
# new_parse(forumName, False) | |||
closetor() | |||
# Opens Tor Browser | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
path = open('../../path.txt').readline() | |||
pro = subprocess.Popen(path) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input("Tor Connected. Press ENTER to continue\n") | |||
return | |||
# Creates a connection through Tor Port | |||
def getUrl(timeout=None): | |||
socket.socket = socks.socksocket | |||
socket.create_connection = create_connection | |||
return | |||
# Makes the onion address request | |||
def create_connection(address, timeout=None, source_address=None): | |||
sock = socks.socksocket() | |||
sock.connect(address) | |||
return sock | |||
# Returns the name of website | |||
def getForumName(): | |||
name = 'CryptBB' | |||
return name | |||
# Return the link of website | |||
def getFixedURL(): | |||
url = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=42&sid=ee2cbfd73c12923d979790b2bb4bdfd5' | |||
return url | |||
# Closes Tor Browser | |||
def closetor(): | |||
global pid | |||
os.system("taskkill /pid " + str(pid)) | |||
print('Closing Tor...') | |||
time.sleep(3) | |||
return | |||
# Creates a Mechanize browser and initializes its options | |||
def createBrowser(): | |||
br = mechanize.Browser() | |||
cj = mechanize.CookieJar() | |||
br.set_cookiejar(cj) | |||
# Browser options | |||
br.set_handle_equiv(True) | |||
br.set_handle_redirect(True) | |||
br.set_handle_referer(True) | |||
br.set_handle_robots(False) | |||
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) | |||
br.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'), | |||
('Accept', '*/*')] | |||
return br | |||
def getAccess(): | |||
url = getFixedURL() | |||
br = createBrowser() | |||
try: | |||
br.open(url) | |||
return br | |||
except: | |||
return 'down' | |||
# Saves the crawled html page | |||
def savePage(page, url): | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
a = page.read() | |||
open(filePath, "wb").write(a) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
def getFullPathName(url): | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = 'C:/Users/CALSysLab/Documents/threatIntelligence-main/DarkWebMining_Working/Forums/BestCardingWorld/HTML_Pages/' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + '/' + 'Description/' + fileName + '.html' | |||
else: | |||
fullPath = 'C:/Users/CALSysLab/Documents/threatIntelligence-main/DarkWebMining_Working/Forums/BestCardingWorld/HTML_Pages/' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + '/' + 'Listing/' + fileName + '.html' | |||
return fullPath | |||
# Creates the name of the file based on URL | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# Hacking and Markets related topics | |||
def getInterestedLinks(): | |||
links = [] | |||
links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=43&sid=e12864ffccc5df877b03b573534955be') | |||
return links | |||
# Start crawling Forum pages | |||
def crawlForum(br): | |||
print("Crawling CryptBB forum") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
page = br.open(link) | |||
savePage(page, link) | |||
res = br.response().read() | |||
soup = BeautifulSoup(res, 'html.parser') | |||
next_link = soup.find("a", {"rel": "next"}) | |||
if next_link != None: | |||
full_url = urlparse.urljoin(linksToCrawl[i], next_link['href']) | |||
linksToCrawl.insert(i + 1, full_url) | |||
listOfTopics = findDescriptionPages(link) | |||
for topic in listOfTopics: | |||
itemPage = br.open(str(topic)) | |||
savePage(itemPage, topic) | |||
except Exception as e: | |||
print('Error getting link: ', link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("CryptBB forum done sucessfully. Press ENTER to continue\n") | |||
return | |||
# Returns True if the link is 'Topic' Links, may need to change for diff websites | |||
def isDescriptionLink(url): | |||
if 'topic' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link, may need to change for diff websites | |||
def isListingLink(url): | |||
''' | |||
reg = 'board=[0-9]+.[0-9]+\Z' | |||
if len(re.findall(reg, url)) == 0: | |||
return False | |||
return True | |||
''' | |||
if 'forum' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links | |||
def findDescriptionPages(url): | |||
soup = "" | |||
error = False | |||
try: | |||
html = codecs.open( | |||
"C:\\Users\\CALSysLab\\Documents\\threatIntelligence-main\\DarkWebMining_Working\\Forums\\BestCardingWorld\\HTML_Pages\\" + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + "\\Listing\\" + getNameFromURL(url) + ".html", encoding='utf8') | |||
soup = BeautifulSoup(html, "html.parser") | |||
except: | |||
try: | |||
html = open( | |||
"C:\\Users\\CALSysLab\\Documents\\threatIntelligence-main\\DarkWebMining_Working\\Forums\\BestCardingWorld\\HTML_Pages\\" + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + "\\Listing\\" + getNameFromURL(url) + ".html") | |||
soup = BeautifulSoup(html, "html.parser") | |||
except: | |||
error = True | |||
print("There was a problem to read the file " + getNameFromURL(url) + " in the listing section.") | |||
if not error: | |||
return bestcardingworld_links_parser(soup) | |||
else: | |||
return [] | |||
def crawler(): | |||
startCrawling() | |||
print("Crawling and Parsing CryptBB .... DONE!") |
@ -0,0 +1,317 @@ | |||
__author__ = 'Helium' | |||
''' | |||
Altenens Forum Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.common.by import By | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
from datetime import date | |||
import configparser | |||
import subprocess | |||
from bs4 import BeautifulSoup | |||
from Forums.Initialization.prepare_parser import new_parse | |||
from Forums.Altenens.parser import altenens_links_parser | |||
from Forums.Utilities.utilities import cleanHTML | |||
config = configparser.ConfigParser() | |||
config.read('../../setup.ini') | |||
counter = 1 | |||
baseURL = 'https://altenens.is/' | |||
# Opens Tor Browser, crawls the website | |||
def startCrawling(): | |||
opentor() | |||
# forumName = getForumName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
# new_parse(forumName, False) | |||
# Opens Tor Browser | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Login using premade account credentials and do login captcha manually | |||
def login(driver): | |||
#click login button | |||
login = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[1]/div/div/div/div[1]/a[1]').\ | |||
get_attribute('href') | |||
driver.get(login) | |||
# login.click() | |||
# #entering username and password into input boxes | |||
# usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/form/div[1]/div/dl[1]/dd') | |||
# #Username here | |||
# usernameBox.send_keys('mylittlepony45')#sends string to the username box | |||
# passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/form/div[1]/div/dl[2]/dd/div/div') | |||
# #Password here | |||
# passwordBox.send_keys('johnnyTest@18')# sends string to passwordBox | |||
input("Press ENTER when CAPTCHA is completed\n") | |||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||
# wait for 50 sec until id = tab_content is found, then cont | |||
WebDriverWait(driver, 50).until(EC.visibility_of_element_located( | |||
(By.XPATH, '///html/body/div[1]/div[4]/div/div/div[3]/div/div/div[4]/div/div/div[1]/div/div[1]/div[2]/ol/li[1]'))) | |||
# Returns the name of the website | |||
def getForumName(): | |||
name = 'Altenens' | |||
return name | |||
# Return the link of the website | |||
def getFixedURL(): | |||
url = 'https://altenens.is/' | |||
return url | |||
# Closes Tor Browser | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() #close tab | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 3) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", True) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
return driver | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url)# open url in browser | |||
return driver | |||
except: | |||
driver.close()# close tab | |||
return 'down' | |||
# Saves the crawled html page | |||
def savePage(page, url): | |||
cleanPage = cleanHTML(page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
def getFullPathName(url): | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = r'..\\Altenens\\HTML_Pages\\' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html' | |||
else: | |||
fullPath = r'..\\Altenens\\HTML_Pages\\' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html' | |||
return fullPath | |||
# Creates the file name from passed URL | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
def getInterestedLinks(): | |||
links = [] | |||
# # Hacking Tools | |||
links.append('https://altenens.is/forums/hacking-tools.469165/') | |||
# # hash cracking | |||
# links.append('https://altenens.is/forums/hash-cracking.469167/') | |||
# # phishing and spamming | |||
# links.append('https://altenens.is/forums/phishing-and-spamming.469223/') | |||
# # pentesting | |||
# links.append('https://altenens.is/forums/pentesting.469169/') | |||
# # cracking tools | |||
# links.append('https://altenens.is/forums/cracking-tools.469204/') | |||
# # Cracking Tools | |||
# links.append('https://altenens.is/forums/cracking-tutorials-other-methods.469205/') | |||
return links | |||
def crawlForum(driver): | |||
print("Crawling the Altenens forum") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
i = 0 | |||
count = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
try: | |||
driver.get(link)# open | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
has_next_page = True | |||
#loop through the topics | |||
while has_next_page: | |||
list = topicPages(html)# for multiple pages | |||
for item in list: | |||
#variable to check if there is a next page for the topic | |||
has_next_topic_page = True | |||
counter = 1 | |||
# check if there is a next page for the topics | |||
while has_next_topic_page: | |||
# try to access next page of th topic | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
# if there is a next page then go and save.... | |||
# next page in the topic? | |||
try: | |||
item = driver.find_element(By.XPATH, '/html/body/div[1]/div[4]/div/div/div[3]/div/div/div[1]/div[1]/div[1]/nav/div[1]/a').get_attribute('href') | |||
if item == "": | |||
raise NoSuchElementException | |||
has_next_topic_page = False | |||
else: | |||
counter += 1 | |||
except NoSuchElementException: | |||
has_next_topic_page = False | |||
#end of loop | |||
for i in range(counter): | |||
driver.back() | |||
# comment out | |||
break | |||
# comment out | |||
if count == 1: | |||
count = 0 | |||
break | |||
try:# change depending on web page, #next page | |||
link = driver.find_element(by=By.XPATH, value = '/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/div[1]/div[1]/nav/div[1]/a').get_attribute('href') | |||
if link == "": | |||
raise NoSuchElementException | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("Crawling Altenens forum done successfully. Press ENTER to continue\n") | |||
# Returns 'True' if the link is Topic link, may need to change for every website | |||
def isDescriptionLink(url): | |||
if 'threads' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link, may need to change for every website | |||
def isListingLink(url): | |||
if 'forums' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links | |||
def topicPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
#print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text) | |||
return altenens_links_parser(soup) | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,462 @@ | |||
1655762162153 geckodriver INFO Listening on 127.0.0.1:50333 | |||
1655762166434 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50334" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofileqUeAAN" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655762166904 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50334/devtools/browser/fb880205-35da-44a4-83b8-a861ce7125f1 | |||
1655762168590 Marionette INFO Listening on port 50341 | |||
1655762169104 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655762207624 Marionette INFO Stopped listening on port 50341 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
###!!! [Parent][RunMessage] Error: Channel closing: too late to send/recv, messages will be lost | |||
1655762207923 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655762990320 geckodriver INFO Listening on 127.0.0.1:50884 | |||
1655762994595 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50885" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofileurjxEe" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655762995244 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50885/devtools/browser/36503f58-7ab7-4188-81b0-fa1b81f32c0a | |||
1655762996997 Marionette INFO Listening on port 50890 | |||
JavaScript error: resource://gre/modules/ExtensionContent.jsm, line 575: TypeError: PrecompiledScript.executeInGlobal: Argument 1 is not an object. | |||
1655762997277 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655763083091 Marionette INFO Stopped listening on port 50890 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655763083216 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655763089557 geckodriver INFO Listening on 127.0.0.1:50923 | |||
1655763093759 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50924" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofile5mY1qq" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655763094208 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50924/devtools/browser/68ce2df9-521f-4258-94f7-c2e2b199701a | |||
1655763095918 Marionette INFO Listening on port 50929 | |||
1655763096408 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655763143935 Marionette INFO Stopped listening on port 50929 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655763144495 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655763166975 geckodriver INFO Listening on 127.0.0.1:50947 | |||
1655763171175 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50948" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofilenAGZM5" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655763171719 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50948/devtools/browser/0b9966bd-0cec-45cc-9d11-02a2580233f0 | |||
1655763173307 Marionette INFO Listening on port 50953 | |||
1655763173368 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655763208922 Marionette INFO Stopped listening on port 50953 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
JavaScript error: chrome://remote/content/marionette/cert.js, line 55: NS_ERROR_NOT_AVAILABLE: Component returned failure code: 0x80040111 (NS_ERROR_NOT_AVAILABLE) [nsICertOverrideService.setDisableAllSecurityChecksAndLetAttackersInterceptMyData] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655763209261 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655763387409 geckodriver INFO Listening on 127.0.0.1:49752 | |||
1655763391806 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "49753" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofilevAHJSJ" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655763392827 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
DevTools listening on ws://localhost:49753/devtools/browser/7fe49a16-940f-4ee8-9a2f-48739a92db78 | |||
1655763394700 Marionette INFO Listening on port 49759 | |||
1655763395103 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655763430848 Marionette INFO Stopped listening on port 49759 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
JavaScript error: chrome://remote/content/marionette/cert.js, line 55: NS_ERROR_NOT_AVAILABLE: Component returned failure code: 0x80040111 (NS_ERROR_NOT_AVAILABLE) [nsICertOverrideService.setDisableAllSecurityChecksAndLetAttackersInterceptMyData] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655763431145 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655765589286 geckodriver INFO Listening on 127.0.0.1:50102 | |||
1655765621677 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50103" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofile8ecUpb" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655765622149 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
DevTools listening on ws://localhost:50103/devtools/browser/e5a916fc-987c-43d4-9c12-9c9b88cca242 | |||
1655765623776 Marionette INFO Listening on port 50109 | |||
1655765623892 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655766125168 Marionette INFO Stopped listening on port 50109 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655766125303 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655766505099 geckodriver INFO Listening on 127.0.0.1:50156 | |||
1655766509351 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50157" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofileVqvmzh" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655766509876 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50157/devtools/browser/13084c14-64d3-48a5-8b99-4c514e961d4f | |||
1655766511328 Marionette INFO Listening on port 50162 | |||
1655766511395 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655766552298 Marionette INFO Stopped listening on port 50162 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
JavaScript error: chrome://remote/content/marionette/cert.js, line 55: NS_ERROR_NOT_AVAILABLE: Component returned failure code: 0x80040111 (NS_ERROR_NOT_AVAILABLE) [nsICertOverrideService.setDisableAllSecurityChecksAndLetAttackersInterceptMyData] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655766552578 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655767657126 geckodriver INFO Listening on 127.0.0.1:50257 | |||
1655767661375 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50258" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofileB9Dzeh" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655767661843 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50258/devtools/browser/867cd318-6430-4b3f-9b63-8c6bdf17636a | |||
1655767663330 Marionette INFO Listening on port 50263 | |||
1655767663431 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655767668263 Marionette INFO Stopped listening on port 50263 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
###!!! [Parent][RunMessage] Error: Channel closing: too late to send/recv, messages will be lost | |||
1655767668591 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655767689480 geckodriver INFO Listening on 127.0.0.1:50276 | |||
1655767693635 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50277" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofile8ZOQe7" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655767694215 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50277/devtools/browser/a70a3f5b-19af-4a68-99c8-c46086ba9599 | |||
1655767695725 Marionette INFO Listening on port 50282 | |||
1655767695830 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655767701272 Marionette INFO Stopped listening on port 50282 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655767701594 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655767791360 geckodriver INFO Listening on 127.0.0.1:50296 | |||
1655767795575 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50297" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofile4GEqBr" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655767796001 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50297/devtools/browser/e2d4e57a-16ce-429d-9804-e9e391199ddf | |||
1655767797534 Marionette INFO Listening on port 50302 | |||
1655767797661 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655767802833 Marionette INFO Stopped listening on port 50302 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655767803151 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655767902908 geckodriver INFO Listening on 127.0.0.1:50320 | |||
1655767907009 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50321" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofileqlJ8ZA" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655767907484 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
DevTools listening on ws://localhost:50321/devtools/browser/49e77669-01dd-447e-a804-36d42b3400cf | |||
1655767908782 Marionette INFO Listening on port 50326 | |||
1655767909069 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655767916218 Marionette INFO Stopped listening on port 50326 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655767916523 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655767958348 geckodriver INFO Listening on 127.0.0.1:50340 | |||
1655767962557 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50341" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofilekpCdfs" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655767963032 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50341/devtools/browser/b4d1728c-d26f-4537-adc2-5c954b171b13 | |||
1655767964591 Marionette INFO Listening on port 50346 | |||
1655767964633 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655767973210 Marionette INFO Stopped listening on port 50346 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655767973501 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655767998084 geckodriver INFO Listening on 127.0.0.1:50358 | |||
1655768002314 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50359" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofileujYNBj" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655768002757 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50359/devtools/browser/04112c45-204d-43f1-9615-34782fd06632 | |||
1655768004165 Marionette INFO Listening on port 50364 | |||
1655768004375 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655768009787 Marionette INFO Stopped listening on port 50364 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
###!!! [Child][RunMessage] Error: Channel closing: too late to send/recv, messages will be lost | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
###!!! [Parent][RunMessage] Error: Channel closing: too late to send/recv, messages will be lost | |||
1655768010100 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655768083966 geckodriver INFO Listening on 127.0.0.1:50386 | |||
1655768088234 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50387" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofile39Pl1f" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655768088664 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50387/devtools/browser/3d6cda42-cb60-4c67-8d76-1d186a51887f | |||
1655768090263 Marionette INFO Listening on port 50392 | |||
1655768090299 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655768091655 Marionette INFO Stopped listening on port 50392 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
###!!! [Parent][MessageChannel] Error: (msgtype=0x390076,name=PContent::Msg_DestroyBrowsingContextGroup) Closed channel: cannot send/recv | |||
###!!! [Parent][MessageChannel] Error: (msgtype=0x390076,name=PContent::Msg_DestroyBrowsingContextGroup) Closed channel: cannot send/recv | |||
###!!! [Parent][MessageChannel] Error: (msgtype=0x390076,name=PContent::Msg_DestroyBrowsingContextGroup) Closed channel: cannot send/recv | |||
1655768092659 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655768101201 geckodriver INFO Listening on 127.0.0.1:50404 | |||
1655768105373 mozrunner::runner INFO Running command: "C:\\Users\\CALSysLab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50405" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofile2YRGSz" | |||
console.log: "TorSettings: loadFromPrefs()" | |||
console.log: "TorConnect: Init" | |||
console.log: "TorConnect: observed profile-after-change" | |||
console.log: "TorConnect: observing topic 'TorBootstrapStatus'" | |||
console.log: "TorConnect: observing topic 'TorBootstrapError'" | |||
console.log: "TorConnect: observing topic 'TorProcessExited'" | |||
console.log: "TorConnect: observing topic 'TorLogHasWarnOrErr'" | |||
console.log: "TorConnect: observing topic 'torsettings:ready'" | |||
console.log: "TorSettings: observed profile-after-change" | |||
1655768105853 Marionette INFO Marionette enabled | |||
console.log: "TorConnect: will load after bootstrap => [about:blank]" | |||
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined | |||
console.error: "Could not load engine [email protected]: Error: Extension is invalid" | |||
DevTools listening on ws://localhost:50405/devtools/browser/9c4a4d71-25ff-4980-a54d-8545d6200790 | |||
1655768107431 Marionette INFO Listening on port 50410 | |||
1655768107530 RemoteAgent WARN TLS certificate errors will be ignored for this session | |||
1655768119155 Marionette INFO Stopped listening on port 50410 | |||
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver] | |||
!!! error running onStopped callback: TypeError: callback is not a function | |||
1655768119438 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64 | |||
unwatchForTabs()@TargetList.jsm:70 | |||
unwatchForTargets()@TargetList.jsm:37 | |||
destructor()@TargetList.jsm:109 | |||
stop()@CDP.jsm:104 | |||
close()@RemoteAgent.jsm:138 | |||
1655768229472 geckodriver INFO Listening on 127.0.0.1:50431 | |||
1655768233458 webdriver::server WARN Rejected request with missing Host header |
@ -0,0 +1,264 @@ | |||
__author__ = 'Helium' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from Forums.Utilities.utilities import * | |||
from datetime import date | |||
from datetime import timedelta | |||
import re | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages) | |||
def cryptBB_description_parser(soup): | |||
# Fields to be parsed | |||
topic = "-1" # topic name | |||
user = [] # all users of each post | |||
addDate = [] # all dated of each post | |||
feedback = [] # all feedbacks of each vendor (this was found in just one Forum and with a number format) | |||
status = [] # all user's authority in each post such as (adm, member, dangerous) | |||
reputation = [] # all user's karma in each post (usually found as a number) | |||
sign = [] # all user's signature in each post (usually a standard message after the content of the post) | |||
post = [] # all messages of each post | |||
interest = [] # all user's interest in each post | |||
# Finding the topic (should be just one coming from the Listing Page) | |||
li = soup.find("td", {"class": "thead"}).find('strong') | |||
topic = li.text | |||
topic = re.sub("\[\w*\]", '', topic) | |||
topic = topic.replace(",","") | |||
topic = topic.replace("\n","") | |||
topic = cleanString(topic.strip()) | |||
# Finding the repeated tag that corresponds to the listing of posts | |||
# try: | |||
posts = soup.find('table', {"class": "tborder tfixed clear"}).find('td', {"id": "posts_container"}).find_all( | |||
'div', {"class": "post"}) | |||
# For each message (post), get all the fields we are interested to: | |||
for ipost in posts: | |||
# Finding a first level of the HTML page | |||
post_wrapper = ipost.find('span', {"class": "largetext"}) | |||
# Finding the author (user) of the post | |||
author = post_wrapper.text.strip() | |||
user.append(cleanString(author)) # Remember to clean the problematic characters | |||
# Finding the status of the author | |||
smalltext = ipost.find('div', {"class": "post_author"}) | |||
''' | |||
# Testing here two possibilities to find this status and combine them | |||
if ipost.find('div', {"class": "deleted_post_author"}): | |||
status.append(-1) | |||
interest.append(-1) | |||
reputation.append(-1) | |||
addDate.append(-1) | |||
post.append("THIS POST HAS BEEN REMOVED!") | |||
sign.append(-1) | |||
feedback.append(-1) | |||
continue | |||
''' | |||
# CryptBB does have membergroup and postgroup | |||
membergroup = smalltext.find('div', {"class": "profile-rank"}) | |||
postgroup = smalltext.find('div', {"class": "postgroup"}) | |||
if membergroup != None: | |||
membergroup = membergroup.text.strip() | |||
if postgroup != None: | |||
postgroup = postgroup.text.strip() | |||
membergroup = membergroup + " - " + postgroup | |||
else: | |||
if postgroup != None: | |||
membergroup = postgroup.text.strip() | |||
else: | |||
membergroup = "-1" | |||
status.append(cleanString(membergroup)) | |||
# Finding the interest of the author | |||
# CryptBB does not have blurb | |||
blurb = smalltext.find('li', {"class": "blurb"}) | |||
if blurb != None: | |||
blurb = blurb.text.strip() | |||
else: | |||
blurb = "-1" | |||
interest.append(cleanString(blurb)) | |||
# Finding the reputation of the user | |||
# CryptBB does have reputation | |||
author_stats = smalltext.find('div', {"class": "author_statistics"}) | |||
karma = author_stats.find('strong') | |||
if karma != None: | |||
karma = karma.text | |||
karma = karma.replace("Community Rating: ", "") | |||
karma = karma.replace("Karma: ", "") | |||
karma = karma.strip() | |||
else: | |||
karma = "-1" | |||
reputation.append(cleanString(karma)) | |||
# Getting here another good tag to find the post date, post content and users' signature | |||
postarea = ipost.find('div', {"class": "post_content"}) | |||
dt = postarea.find('span', {"class": "post_date"}).text | |||
# dt = dt.strip().split() | |||
dt = dt.strip() | |||
day=date.today() | |||
if "Yesterday" in dt: | |||
yesterday = day - timedelta(days=1) | |||
yesterday = yesterday.strftime('%m-%d-%Y') | |||
stime = dt.replace('Yesterday,','').strip() | |||
date_time_obj = yesterday+ ', '+stime | |||
date_time_obj = datetime.strptime(date_time_obj,'%m-%d-%Y, %I:%M %p') | |||
elif "hours ago" in dt: | |||
day = day.strftime('%m-%d-%Y') | |||
date_time_obj = postarea.find('span', {"class": "post_date"}).find('span')['title'] | |||
date_time_obj = datetime.strptime(date_time_obj, '%m-%d-%Y, %I:%M %p') | |||
else: | |||
date_time_obj = datetime.strptime(dt, '%m-%d-%Y, %I:%M %p') | |||
stime = date_time_obj.strftime('%b %d, %Y') | |||
sdate = date_time_obj.strftime('%I:%M %p') | |||
addDate.append(date_time_obj) | |||
# Finding the post | |||
inner = postarea.find('div', {"class": "post_body scaleimages"}) | |||
inner = inner.text.strip() | |||
post.append(cleanString(inner)) | |||
# Finding the user's signature | |||
# signature = ipost.find('div', {"class": "post_wrapper"}).find('div', {"class": "moderatorbar"}).find('div', {"class": "signature"}) | |||
signature = ipost.find('div', {"class": "signature scaleimages"}) | |||
if signature != None: | |||
signature = signature.text.strip() | |||
# print(signature) | |||
else: | |||
signature = "-1" | |||
sign.append(cleanString(signature)) | |||
# As no information about user's feedback was found, just assign "-1" to the variable | |||
feedback.append("-1") | |||
''' | |||
except: | |||
if soup.find('td', {"class": "trow1"}).text == " You do not have permission to access this page. ": | |||
user.append("-1") | |||
status.append(-1) | |||
interest.append(-1) | |||
reputation.append(-1) | |||
addDate.append(-1) | |||
post.append("NO ACCESS TO THIS PAGE!") | |||
sign.append(-1) | |||
feedback.append(-1) | |||
''' | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
row = (topic, user, status, reputation, interest, sign, post, feedback, addDate) | |||
# Sending the results | |||
return row | |||
# This is the method to parse the Listing Pages (one page with many posts) | |||
def cryptBB_listing_parser(soup): | |||
board = "-1" # board name (the previous level of the topic in the Forum categorization tree. | |||
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware) | |||
nm = 0 # this variable should receive the number of topics | |||
topic = [] # all topics | |||
author = [] # all authors of each topic | |||
views = [] # number of views of each topic | |||
posts = [] # number of posts of each topic | |||
addDate = [] # when the topic was created (difficult to find) | |||
href = [] # this variable should receive all cleaned urls (we will use this to do the marge between | |||
# Listing and Description pages) | |||
# Finding the board (should be just one) | |||
board = soup.find('span', {"class": "active"}).text | |||
board = cleanString(board.strip()) | |||
# Finding the repeated tag that corresponds to the listing of topics | |||
itopics = soup.find_all('tr',{"class": "inline_row"}) | |||
for itopic in itopics: | |||
# For each topic found, the structure to get the rest of the information can be of two types. Testing all of them | |||
# to don't miss any topic | |||
# Adding the topic to the topic list | |||
try: | |||
topics = itopic.find('span', {"class": "subject_old"}).find('a').text | |||
except: | |||
topics = itopic.find('span', {"class": "subject_new"}).find('a').text | |||
topics = re.sub("\[\w*\]", '', topics) | |||
topic.append(cleanString(topics)) | |||
# Counting how many topics we have found so far | |||
nm = len(topic) | |||
# Adding the url to the list of urls | |||
try: | |||
link = itopic.find('span', {"class": "subject_old"}).find('a').get('href') | |||
except: | |||
link = itopic.find('span',{"class": "subject_new"}).find('a').get('href') | |||
link = cleanLink(link) | |||
href.append(link) | |||
# Finding the author of the topic | |||
ps = itopic.find('div', {"class":"author smalltext"}).find('a').text | |||
user = ps.strip() | |||
author.append(cleanString(user)) | |||
# Finding the number of replies | |||
columns = itopic.findChildren('td',recursive=False) | |||
replies = columns[3].text | |||
posts.append(cleanString(replies)) | |||
# Finding the number of Views | |||
tview = columns[4].text | |||
views.append(cleanString(tview)) | |||
# If no information about when the topic was added, just assign "-1" to the variable | |||
addDate.append("-1") | |||
return organizeTopics("CryptBB", nm, topic, board, author, views, posts, href, addDate) | |||
def altenens_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.find('div', {"class": "structItemContainer-group js-threadList"}).find_all('div', {"class": "structItem structItem--thread is-unread js-inlineModContainer js-threadListItem-1843963"}) | |||
for a in listing: | |||
link = a.find('div', {"class": "structItem-title"}).find('a').get('href') | |||
href.append(link) | |||
return href |
@ -1,257 +0,0 @@ | |||
__author__ = '91Shadows' | |||
''' | |||
OnniForums Crawler (Mechanize) | |||
''' | |||
import codecs, os, re | |||
import socks, socket, time | |||
from datetime import date | |||
import urllib.parse as urlparse | |||
import http.client as httplib | |||
import mechanize | |||
import subprocess | |||
from bs4 import BeautifulSoup | |||
from Forums.Initialization.prepare_parser import new_parse | |||
from Forums.OnniForums.parser import onniForums_listing_parser | |||
counter = 1 | |||
httplib.HTTPConnection._http_vsn = 10 | |||
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0' | |||
baseURL = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=42&sid=ee2cbfd73c12923d979790b2bb4bdfd5' | |||
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150) | |||
# Opens Tor Browser, crawls the website | |||
def startCrawling(): | |||
opentor() | |||
getUrl() | |||
forumName = getForumName() | |||
br = getAccess() | |||
if br != 'down': | |||
crawlForum(br) | |||
new_parse(forumName, False) | |||
# new_parse(forumName, False) | |||
closetor() | |||
# Opens Tor Browser | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
path = open('../../path.txt').readline() | |||
pro = subprocess.Popen(path) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input("Tor Connected. Press ENTER to continue\n") | |||
return | |||
# Creates a connection through Tor Port | |||
def getUrl(timeout=None): | |||
socket.socket = socks.socksocket | |||
socket.create_connection = create_connection | |||
return | |||
# Makes the onion address request | |||
def create_connection(address, timeout=None, source_address=None): | |||
sock = socks.socksocket() | |||
sock.connect(address) | |||
return sock | |||
# Returns the name of website | |||
def getForumName(): | |||
name = 'CrackingPro' | |||
return name | |||
# Return the link of website | |||
def getFixedURL(): | |||
url = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=42&sid=ee2cbfd73c12923d979790b2bb4bdfd5' | |||
return url | |||
# Closes Tor Browser | |||
def closetor(): | |||
global pid | |||
os.system("taskkill /pid " + str(pid)) | |||
print('Closing Tor...') | |||
time.sleep(3) | |||
return | |||
# Creates a Mechanize browser and initializes its options | |||
def createBrowser(): | |||
br = mechanize.Browser() | |||
cj = mechanize.CookieJar() | |||
br.set_cookiejar(cj) | |||
# Browser options | |||
br.set_handle_equiv(True) | |||
br.set_handle_redirect(True) | |||
br.set_handle_referer(True) | |||
br.set_handle_robots(False) | |||
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) | |||
br.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'), | |||
('Accept', '*/*')] | |||
return br | |||
def getAccess(): | |||
url = getFixedURL() | |||
br = createBrowser() | |||
try: | |||
br.open(url) | |||
return br | |||
except: | |||
return 'down' | |||
# Saves the crawled html page | |||
def savePage(page, url): | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
a = page.read() | |||
open(filePath, "wb").write(a) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
def getFullPathName(url): | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = 'C:/Users/CALSysLab/Documents/threatIntelligence-main/DarkWebMining_Working/Forums/CrackingPro/HTML_Pages/' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + '/' + 'Description/' + fileName + '.html' | |||
else: | |||
fullPath = 'C:/Users/CALSysLab/Documents/threatIntelligence-main/DarkWebMining_Working/Forums/CrackingPro/HTML_Pages/' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + '/' + 'Listing/' + fileName + '.html' | |||
return fullPath | |||
# Creates the name of the file based on URL | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# Hacking and Markets related topics | |||
def getInterestedLinks(): | |||
links = [] | |||
links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=43&sid=e12864ffccc5df877b03b573534955be') | |||
return links | |||
# Start crawling Forum pages | |||
def crawlForum(br): | |||
print("Crawling CrackingPro forum") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
page = br.open(link)#open url | |||
savePage(page, link) | |||
res = br.response().read() | |||
soup = BeautifulSoup(res, 'html.parser') | |||
next_link = soup.find("a", {"rel": "next"}) | |||
if next_link != None: | |||
full_url = urlparse.urljoin(linksToCrawl[i], next_link['href']) | |||
linksToCrawl.insert(i + 1, full_url) | |||
listOfTopics = findDescriptionPages(link) | |||
for topic in listOfTopics: | |||
itemPage = br.open(str(topic)) | |||
savePage(itemPage, topic) | |||
except Exception as e: | |||
print('Error getting link: ', link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("Crawling CrackingPro forum done sucessfully. Press ENTER to continue\n") | |||
return | |||
# Returns True if the link is 'Topic' Links | |||
def isDescriptionLink(url): | |||
if 'Topic' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link | |||
def isListingLink(url): | |||
''' | |||
reg = 'board=[0-9]+.[0-9]+\Z' | |||
if len(re.findall(reg, url)) == 0: | |||
return False | |||
return True | |||
''' | |||
if 'Forum' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links | |||
def findDescriptionPages(url): | |||
soup = "" | |||
error = False | |||
try: | |||
html = codecs.open( | |||
"C:\\Users\\CALSysLab\\Documents\\threatIntelligence-main\\DarkWebMining_Working\\Forums\\CrackingPro\\HTML_Pages\\" + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + "\\Listing\\" + getNameFromURL(url) + ".html", encoding='utf8') | |||
soup = BeautifulSoup(html, "html.parser") | |||
except: | |||
try: | |||
html = open( | |||
"C:\\Users\\CALSysLab\\Documents\\threatIntelligence-main\\DarkWebMining_Working\\Forums\\CrackingPro\\HTML_Pages\\" + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + "\\Listing\\" + getNameFromURL(url) + ".html") | |||
soup = BeautifulSoup(html, "html.parser") | |||
except: | |||
error = True | |||
print("There was a problem to read the file " + getNameFromURL(url) + " in the listing section.") | |||
if not error: | |||
return onniForums_listing_parser(soup) | |||
else: | |||
return [] | |||
def crawler(): | |||
startCrawling() | |||
print("Crawling and Parsing CrackingPro .... DONE!") |
@ -1 +1 @@ | |||
OnniForums | |||
OnniForums |
@ -0,0 +1,314 @@ | |||
__author__ = 'Helium' | |||
''' | |||
BlackPyramid Forum Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
from datetime import date | |||
import subprocess | |||
import configparser | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.BlackPyramid.parser import blackpyramid_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
config = configparser.ConfigParser() | |||
config.read('../../setup.ini') | |||
counter = 1 | |||
baseURL = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/login/' | |||
# Opens Tor Browser, crawls the website, then parses, then closes tor | |||
#acts like the main method for the crawler, another function at the end of this code calls this function later | |||
def startCrawling(): | |||
opentor() | |||
# mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
# new_parse(forumName, baseURL, False) | |||
# Opens Tor Browser | |||
#prompts for ENTER input to continue | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Returns the name of the website | |||
#return: name of site in string type | |||
def getMKTName(): | |||
name = 'BlackPyramid' | |||
return name | |||
# Return the base link of the website | |||
#return: url of base site in string type | |||
def getFixedURL(): | |||
url = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/' | |||
return url | |||
# Closes Tor Browser | |||
#@param: current selenium driver | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 2) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", False) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
#return: return the selenium driver or string 'down' | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha | |||
# then allows for manual solving of captcha in the terminal | |||
#@param: current selenium web driver | |||
def login(driver): | |||
# wait for login page | |||
login_link = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[3]/div/main/div/div/div/div[2]/div/div/div/section[1]/input[1]') | |||
login_link.click() # open tab with url | |||
# entering username and password into input boxes | |||
usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]') | |||
# Username here | |||
usernameBox.send_keys('ChipotleSteakBurrito') | |||
passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]') | |||
# Password here | |||
passwordBox.send_keys('BlackBeans') | |||
input("Press ENTER when CAPTCHA is completed\n") | |||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, '/html/body/div[2]/form/nav/nav/ul/li[2]/div/a/span[1]'))) | |||
# Saves the crawled html page, makes the directory path for html pages if not made | |||
def savePage(page, url): | |||
cleanPage = cleanHTML(page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
#@param: raw url as crawler crawls through every site | |||
def getFullPathName(url): | |||
from MarketPlaces.Initialization.markets_mining import CURRENT_DATE | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html' | |||
else: | |||
fullPath = r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html' | |||
return fullPath | |||
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned | |||
#@param: raw url as crawler crawls through every site | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list | |||
#in this example, there are a couple of categories some threads fall under such as | |||
# Guides and Tutorials, Digital Products, and Software and Malware | |||
#as you can see they are categories of products | |||
def getInterestedLinks(): | |||
links = [] | |||
# Hacking Guides | |||
links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/') | |||
# # Exploits | |||
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/') | |||
# # botnets/malware | |||
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/') | |||
# # fraud software | |||
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/') | |||
# # Other Tools | |||
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/') | |||
# # Services | |||
# links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/') | |||
return links | |||
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through | |||
#topic and description pages are crawled through here, where both types of pages are saved | |||
#@param: selenium driver | |||
def crawlForum(driver): | |||
print("Crawling the BlackPyramid market") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
count = 0 | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
try: | |||
clicker = driver.find_element(by=By.XPATH, value='/html/body/div[2]/form/nav/nav/ul/li[2]/div/a') | |||
clicker.click() # open tab with url | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
has_next_page = True | |||
while has_next_page: | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
driver.back() | |||
# comment out | |||
break | |||
# comment out | |||
if count == 1: | |||
count = 0 | |||
break | |||
try: | |||
clicker = driver.find_element(by=By.XPATH, value= | |||
'/html/body/center/div[4]/div/div[3]/div[23]/div[2]/input[1]') | |||
if clicker == "": | |||
raise NoSuchElementException | |||
try: | |||
clicker.click() | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("Crawling BlackPyramid forum done sucessfully. Press ENTER to continue\n") | |||
# Returns 'True' if the link is a description link | |||
#@param: url of any url crawled | |||
#return: true if is a description page, false if not | |||
def isDescriptionLink(url): | |||
if 'products' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link | |||
#@param: url of any url crawled | |||
#return: true if is a Listing page, false if not | |||
def isListingLink(url): | |||
if 'search' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links, the html is the url of a link from the list of interested link list | |||
#@param: link from interested link list ie. getInterestingLinks() | |||
#return: list of description links that should be crawled through | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return blackpyramid_links_parser(soup) | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BlackPyramid .... DONE!") |
@ -0,0 +1,291 @@ | |||
__author__ = 'Helium' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of description page | |||
#return: 'row' that contains a variety of lists that each hold info on the description page | |||
def darkfox_description_parser(soup): | |||
# Fields to be parsed | |||
name = "-1" # 0 Product_Name | |||
describe = "-1" # 1 Product_Description | |||
lastSeen = "-1" # 2 Product_LastViewDate | |||
rules = "-1" # 3 NOT USED ... | |||
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = "-1" # 5 Product_MS_Classification (Microsoft Security) | |||
review = "-1" # 6 Product_Number_Of_Reviews | |||
category = "-1" # 7 Product_Category | |||
shipFrom = "-1" # 8 Product_ShippedFrom | |||
shipTo = "-1" # 9 Product_ShippedTo | |||
left = "-1" # 10 Product_QuantityLeft | |||
escrow = "-1" # 11 Vendor_Warranty | |||
terms = "-1" # 12 Vendor_TermsAndConditions | |||
vendor = "-1" # 13 Vendor_Name | |||
sold = "-1" # 14 Product_QuantitySold | |||
addDate = "-1" # 15 Product_AddedDate | |||
available = "-1" # 16 NOT USED ... | |||
endDate = "-1" # 17 NOT USED ... | |||
BTC = "-1" # 18 Product_BTC_SellingPrice | |||
USD = "-1" # 19 Product_USD_SellingPrice | |||
rating = "-1" # 20 Vendor_Rating | |||
success = "-1" # 21 Vendor_Successful_Transactions | |||
EURO = "-1" # 22 Product_EURO_SellingPrice | |||
# Finding Product Name | |||
name = soup.find('h1').text | |||
name = name.replace('\n', ' ') | |||
name = name.replace(",", "") | |||
name = name.strip() | |||
# Finding Vendor | |||
vendor = soup.find('h3').find('a').text.strip() | |||
# Finding Vendor Rating | |||
rating = soup.find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Successful Transactions | |||
success = soup.find('h3').text | |||
success = success.replace("Vendor: ", "") | |||
success = success.replace(vendor, "") | |||
success = success.replace("(", "") | |||
success = success.replace(")", "") | |||
success = success.strip() | |||
bae = soup.find('div', {'class': "box"}).find_all('ul') | |||
# Finding Prices | |||
USD = bae[1].find('strong').text.strip() | |||
li = bae[2].find_all('li') | |||
# Finding Escrow | |||
escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Category | |||
category = li[1].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Quantity Available | |||
left = li[3].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Number Sold | |||
sold = li[4].find('span', {'class': "tag is-dark"}).text.strip() | |||
li = bae[3].find_all('li') | |||
# Finding Shipment Information (Origin) | |||
if "Ships from:" in li[-2].text: | |||
shipFrom = li[-2].text | |||
shipFrom = shipFrom.replace("Ships from: ", "") | |||
# shipFrom = shipFrom.replace(",", "") | |||
shipFrom = shipFrom.strip() | |||
# Finding Shipment Information (Destination) | |||
shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text | |||
shipTo = shipTo.replace("Ships to: ", "") | |||
shipTo = shipTo.strip() | |||
if "certain countries" in shipTo: | |||
countries = "" | |||
tags = li[-1].find_all('span', {'class': "tag"}) | |||
for tag in tags: | |||
country = tag.text.strip() | |||
countries += country + ", " | |||
shipTo = countries.strip(", ") | |||
# Finding the Product description | |||
describe = soup.find('div', {'class': "pre-line"}).text | |||
describe = describe.replace("\n", " ") | |||
describe = describe.strip() | |||
'''# Finding the Number of Product Reviews | |||
tag = soup.findAll(text=re.compile('Reviews')) | |||
for index in tag: | |||
reviews = index | |||
par = reviews.find('(') | |||
if par >=0: | |||
reviews = reviews.replace("Reviews (","") | |||
reviews = reviews.replace(")","") | |||
reviews = reviews.split(",") | |||
review = str(abs(int(reviews[0])) + abs(int(reviews[1]))) | |||
else : | |||
review = "-1"''' | |||
# Searching for CVE and MS categories | |||
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if cve: | |||
CVE = " " | |||
for idx in cve: | |||
CVE += (idx) | |||
CVE += " " | |||
CVE = CVE.replace(',', ' ') | |||
CVE = CVE.replace('\n', '') | |||
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if ms: | |||
MS = " " | |||
for im in ms: | |||
MS += (im) | |||
MS += " " | |||
MS = MS.replace(',', ' ') | |||
MS = MS.replace('\n', '') | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor, | |||
sold, addDate, available, endDate, BTC, USD, rating, success, EURO) | |||
# Sending the results | |||
return row | |||
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of listing page | |||
#return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def darkfox_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # Total_Products (Should be Integer) | |||
mktName = "DarkFox" # 0 Marketplace_Name | |||
name = [] # 1 Product_Name | |||
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = [] # 3 Product_MS_Classification (Microsoft Security) | |||
category = [] # 4 Product_Category | |||
describe = [] # 5 Product_Description | |||
escrow = [] # 6 Vendor_Warranty | |||
views = [] # 7 Product_Number_Of_Views | |||
reviews = [] # 8 Product_Number_Of_Reviews | |||
addDate = [] # 9 Product_AddDate | |||
lastSeen = [] # 10 Product_LastViewDate | |||
BTC = [] # 11 Product_BTC_SellingPrice | |||
USD = [] # 12 Product_USD_SellingPrice | |||
EURO = [] # 13 Product_EURO_SellingPrice | |||
sold = [] # 14 Product_QuantitySold | |||
qLeft =[] # 15 Product_QuantityLeft | |||
shipFrom = [] # 16 Product_ShippedFrom | |||
shipTo = [] # 17 Product_ShippedTo | |||
vendor = [] # 18 Vendor | |||
rating = [] # 19 Vendor_Rating | |||
success = [] # 20 Vendor_Successful_Transactions | |||
href = [] # 23 Product_Links (Urls) | |||
listing = soup.findAll('div', {"class": "card"}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
bae = a.findAll('a', href=True) | |||
# Adding the url to the list of urls | |||
link = bae[0].get('href') | |||
link = cleanLink(link) | |||
href.append(link) | |||
# Finding the Product | |||
product = bae[1].find('p').text | |||
product = product.replace('\n', ' ') | |||
product = product.replace(",", "") | |||
product = product.replace("...", "") | |||
product = product.strip() | |||
name.append(product) | |||
bae = a.find('div', {'class': "media-content"}).find('div').find_all('div') | |||
if len(bae) >= 5: | |||
# Finding Prices | |||
price = bae[0].text | |||
ud = price.replace(" USD", " ") | |||
# u = ud.replace("$","") | |||
u = ud.replace(",", "") | |||
u = u.strip() | |||
USD.append(u) | |||
# bc = (prc[1]).strip(' BTC') | |||
# BTC.append(bc) | |||
# Finding the Vendor | |||
vendor_name = bae[1].find('a').text | |||
vendor_name = vendor_name.replace(",", "") | |||
vendor_name = vendor_name.strip() | |||
vendor.append(vendor_name) | |||
# Finding the Category | |||
cat = bae[2].find('small').text | |||
cat = cat.replace("Category: ", "") | |||
cat = cat.replace(",", "") | |||
cat = cat.strip() | |||
category.append(cat) | |||
# Finding Number Sold and Quantity Left | |||
num = bae[3].text | |||
num = num.replace("Sold: ", "") | |||
num = num.strip() | |||
sold.append(num) | |||
quant = bae[4].find('small').text | |||
quant = quant.replace("In stock: ", "") | |||
quant = quant.strip() | |||
qLeft.append(quant) | |||
# Finding Successful Transactions | |||
freq = bae[1].text | |||
freq = freq.replace(vendor_name, "") | |||
freq = re.sub(r'Vendor Level \d+', "", freq) | |||
freq = freq.replace("(", "") | |||
freq = freq.replace(")", "") | |||
freq = freq.strip() | |||
success.append(freq) | |||
# Searching for CVE and MS categories | |||
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if not cve: | |||
cveValue="-1" | |||
else: | |||
cee = " " | |||
for idx in cve: | |||
cee += (idx) | |||
cee += " " | |||
cee = cee.replace(',', ' ') | |||
cee = cee.replace('\n', '') | |||
cveValue=cee | |||
CVE.append(cveValue) | |||
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if not ms: | |||
MSValue="-1" | |||
else: | |||
me = " " | |||
for im in ms: | |||
me += (im) | |||
me += " " | |||
me = me.replace(',', ' ') | |||
me = me.replace('\n', '') | |||
MSValue=me | |||
MS.append(MSValue) | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, | |||
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) | |||
#called by the crawler to get description links on a listing page | |||
#@param: beautifulsoup object that is using the correct html page (listing page) | |||
#return: list of description links from a listing page | |||
def blackpyramid_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.findAll('article', {"class": "product"}) | |||
for div in listing: | |||
link = div.find('a', {"class": "ah39063"})['href'] | |||
href.append(link) | |||
return href |
@ -1 +1 @@ | |||
Tor2door | |||
MikesGrandStore |
@ -0,0 +1,310 @@ | |||
__author__ = 'Helium' | |||
''' | |||
LionMarketplace Forum Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
from datetime import date | |||
import subprocess | |||
import configparser | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.LionMarketplace.parser import lionmarketplace_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
config = configparser.ConfigParser() | |||
config.read('../../setup.ini') | |||
counter = 1 | |||
baseURL = 'http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/' | |||
# Opens Tor Browser, crawls the website, then parses, then closes tor | |||
#acts like the main method for the crawler, another function at the end of this code calls this function later | |||
def startCrawling(): | |||
opentor() | |||
# mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
# new_parse(forumName, baseURL, False) | |||
# Opens Tor Browser | |||
#prompts for ENTER input to continue | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Returns the name of the website | |||
#return: name of site in string type | |||
def getMKTName(): | |||
name = 'LionMarketplace' | |||
return name | |||
# Return the base link of the website | |||
#return: url of base site in string type | |||
def getFixedURL(): | |||
url = 'http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/' | |||
return url | |||
# Closes Tor Browser | |||
#@param: current selenium driver | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 2) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", False) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
#return: return the selenium driver or string 'down' | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha | |||
# then allows for manual solving of captcha in the terminal | |||
#@param: current selenium web driver | |||
def login(driver): | |||
# wait for page to show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, "/html/body/div[2]/div[2]/div[2]/div[1]/div/div[2]/div"))) | |||
# Saves the crawled html page, makes the directory path for html pages if not made | |||
def savePage(page, url): | |||
cleanPage = cleanHTML(page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
#@param: raw url as crawler crawls through every site | |||
def getFullPathName(url): | |||
from MarketPlaces.Initialization.markets_mining import CURRENT_DATE | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = r'..\LionMarketplace\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html' | |||
else: | |||
fullPath = r'..\LionMarketplace\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html' | |||
return fullPath | |||
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned | |||
#@param: raw url as crawler crawls through every site | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list | |||
#in this example, there are a couple of categories some threads fall under such as | |||
# Guides and Tutorials, Digital Products, and Software and Malware | |||
#as you can see they are categories of products | |||
def getInterestedLinks(): | |||
links = [] | |||
# Software/Malware | |||
links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/16') | |||
# # Carding | |||
# links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/20') | |||
# # Hacker for hire | |||
# links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/0b19f3a0-c7e8-11ec-997b-0dcb6b05ce1d') | |||
# # Phishing | |||
# links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/18098bb0-c7e8-11ec-95e9-45b5e8898cbd') | |||
# # Ransomware | |||
# links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/ce72cee0-c7e7-11ec-a86b-c1ff2d3b2020') | |||
# # Exploits | |||
# links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/e26387c0-c7e7-11ec-a708-ab6dc5117763') | |||
# # Spamming and Anti-Captcha | |||
# links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/f08a9380-c7e7-11ec-918c-ffef7c670c97') | |||
# hacked accounts | |||
#links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/fd47b4a0-c7e7-11ec-937b-61246c4b12b3') | |||
return links | |||
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through | |||
#topic and description pages are crawled through here, where both types of pages are saved | |||
#@param: selenium driver | |||
def crawlForum(driver): | |||
print("Crawling the LionMarketplace market") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
count = 0 | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
has_next_page = True | |||
while has_next_page: | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
driver.back() | |||
# comment out | |||
break | |||
# comment out | |||
if count == 1: | |||
count = 0 | |||
break | |||
try: | |||
link = driver.find_element(by=By.XPATH, value= | |||
'/html/body/div[2]/div[2]/div/div[2]/nav/ul/li[5]/a').get_attribute('href') | |||
if link == "": | |||
raise NoSuchElementException | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("Crawling LionMarketplace forum done sucessfully. Press ENTER to continue\n") | |||
# Returns 'True' if the link is a description link | |||
#@param: url of any url crawled | |||
#return: true if is a description page, false if not | |||
def isDescriptionLink(url): | |||
if 'product' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link | |||
#@param: url of any url crawled | |||
#return: true if is a Listing page, false if not | |||
def isListingLink(url): | |||
if 'category' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links, the html is the url of a link from the list of interested link list | |||
#@param: link from interested link list ie. getInterestingLinks() | |||
#return: list of description links that should be crawled through | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return lionmarketplace_links_parser(soup) | |||
# Drop links that "signout" | |||
# def isSignOut(url): | |||
# #absURL = urlparse.urljoin(url.base_url, url.url) | |||
# if 'signout' in url.lower() or 'logout' in url.lower(): | |||
# return True | |||
# | |||
# return False | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,291 @@ | |||
__author__ = 'Helium' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of description page | |||
#return: 'row' that contains a variety of lists that each hold info on the description page | |||
def darkfox_description_parser(soup): | |||
# Fields to be parsed | |||
name = "-1" # 0 Product_Name | |||
describe = "-1" # 1 Product_Description | |||
lastSeen = "-1" # 2 Product_LastViewDate | |||
rules = "-1" # 3 NOT USED ... | |||
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = "-1" # 5 Product_MS_Classification (Microsoft Security) | |||
review = "-1" # 6 Product_Number_Of_Reviews | |||
category = "-1" # 7 Product_Category | |||
shipFrom = "-1" # 8 Product_ShippedFrom | |||
shipTo = "-1" # 9 Product_ShippedTo | |||
left = "-1" # 10 Product_QuantityLeft | |||
escrow = "-1" # 11 Vendor_Warranty | |||
terms = "-1" # 12 Vendor_TermsAndConditions | |||
vendor = "-1" # 13 Vendor_Name | |||
sold = "-1" # 14 Product_QuantitySold | |||
addDate = "-1" # 15 Product_AddedDate | |||
available = "-1" # 16 NOT USED ... | |||
endDate = "-1" # 17 NOT USED ... | |||
BTC = "-1" # 18 Product_BTC_SellingPrice | |||
USD = "-1" # 19 Product_USD_SellingPrice | |||
rating = "-1" # 20 Vendor_Rating | |||
success = "-1" # 21 Vendor_Successful_Transactions | |||
EURO = "-1" # 22 Product_EURO_SellingPrice | |||
# Finding Product Name | |||
name = soup.find('h1').text | |||
name = name.replace('\n', ' ') | |||
name = name.replace(",", "") | |||
name = name.strip() | |||
# Finding Vendor | |||
vendor = soup.find('h3').find('a').text.strip() | |||
# Finding Vendor Rating | |||
rating = soup.find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Successful Transactions | |||
success = soup.find('h3').text | |||
success = success.replace("Vendor: ", "") | |||
success = success.replace(vendor, "") | |||
success = success.replace("(", "") | |||
success = success.replace(")", "") | |||
success = success.strip() | |||
bae = soup.find('div', {'class': "box"}).find_all('ul') | |||
# Finding Prices | |||
USD = bae[1].find('strong').text.strip() | |||
li = bae[2].find_all('li') | |||
# Finding Escrow | |||
escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Category | |||
category = li[1].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Quantity Available | |||
left = li[3].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Number Sold | |||
sold = li[4].find('span', {'class': "tag is-dark"}).text.strip() | |||
li = bae[3].find_all('li') | |||
# Finding Shipment Information (Origin) | |||
if "Ships from:" in li[-2].text: | |||
shipFrom = li[-2].text | |||
shipFrom = shipFrom.replace("Ships from: ", "") | |||
# shipFrom = shipFrom.replace(",", "") | |||
shipFrom = shipFrom.strip() | |||
# Finding Shipment Information (Destination) | |||
shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text | |||
shipTo = shipTo.replace("Ships to: ", "") | |||
shipTo = shipTo.strip() | |||
if "certain countries" in shipTo: | |||
countries = "" | |||
tags = li[-1].find_all('span', {'class': "tag"}) | |||
for tag in tags: | |||
country = tag.text.strip() | |||
countries += country + ", " | |||
shipTo = countries.strip(", ") | |||
# Finding the Product description | |||
describe = soup.find('div', {'class': "pre-line"}).text | |||
describe = describe.replace("\n", " ") | |||
describe = describe.strip() | |||
'''# Finding the Number of Product Reviews | |||
tag = soup.findAll(text=re.compile('Reviews')) | |||
for index in tag: | |||
reviews = index | |||
par = reviews.find('(') | |||
if par >=0: | |||
reviews = reviews.replace("Reviews (","") | |||
reviews = reviews.replace(")","") | |||
reviews = reviews.split(",") | |||
review = str(abs(int(reviews[0])) + abs(int(reviews[1]))) | |||
else : | |||
review = "-1"''' | |||
# Searching for CVE and MS categories | |||
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if cve: | |||
CVE = " " | |||
for idx in cve: | |||
CVE += (idx) | |||
CVE += " " | |||
CVE = CVE.replace(',', ' ') | |||
CVE = CVE.replace('\n', '') | |||
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if ms: | |||
MS = " " | |||
for im in ms: | |||
MS += (im) | |||
MS += " " | |||
MS = MS.replace(',', ' ') | |||
MS = MS.replace('\n', '') | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor, | |||
sold, addDate, available, endDate, BTC, USD, rating, success, EURO) | |||
# Sending the results | |||
return row | |||
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of listing page | |||
#return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def darkfox_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # Total_Products (Should be Integer) | |||
mktName = "DarkFox" # 0 Marketplace_Name | |||
name = [] # 1 Product_Name | |||
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = [] # 3 Product_MS_Classification (Microsoft Security) | |||
category = [] # 4 Product_Category | |||
describe = [] # 5 Product_Description | |||
escrow = [] # 6 Vendor_Warranty | |||
views = [] # 7 Product_Number_Of_Views | |||
reviews = [] # 8 Product_Number_Of_Reviews | |||
addDate = [] # 9 Product_AddDate | |||
lastSeen = [] # 10 Product_LastViewDate | |||
BTC = [] # 11 Product_BTC_SellingPrice | |||
USD = [] # 12 Product_USD_SellingPrice | |||
EURO = [] # 13 Product_EURO_SellingPrice | |||
sold = [] # 14 Product_QuantitySold | |||
qLeft =[] # 15 Product_QuantityLeft | |||
shipFrom = [] # 16 Product_ShippedFrom | |||
shipTo = [] # 17 Product_ShippedTo | |||
vendor = [] # 18 Vendor | |||
rating = [] # 19 Vendor_Rating | |||
success = [] # 20 Vendor_Successful_Transactions | |||
href = [] # 23 Product_Links (Urls) | |||
listing = soup.findAll('div', {"class": "card"}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
bae = a.findAll('a', href=True) | |||
# Adding the url to the list of urls | |||
link = bae[0].get('href') | |||
link = cleanLink(link) | |||
href.append(link) | |||
# Finding the Product | |||
product = bae[1].find('p').text | |||
product = product.replace('\n', ' ') | |||
product = product.replace(",", "") | |||
product = product.replace("...", "") | |||
product = product.strip() | |||
name.append(product) | |||
bae = a.find('div', {'class': "media-content"}).find('div').find_all('div') | |||
if len(bae) >= 5: | |||
# Finding Prices | |||
price = bae[0].text | |||
ud = price.replace(" USD", " ") | |||
# u = ud.replace("$","") | |||
u = ud.replace(",", "") | |||
u = u.strip() | |||
USD.append(u) | |||
# bc = (prc[1]).strip(' BTC') | |||
# BTC.append(bc) | |||
# Finding the Vendor | |||
vendor_name = bae[1].find('a').text | |||
vendor_name = vendor_name.replace(",", "") | |||
vendor_name = vendor_name.strip() | |||
vendor.append(vendor_name) | |||
# Finding the Category | |||
cat = bae[2].find('small').text | |||
cat = cat.replace("Category: ", "") | |||
cat = cat.replace(",", "") | |||
cat = cat.strip() | |||
category.append(cat) | |||
# Finding Number Sold and Quantity Left | |||
num = bae[3].text | |||
num = num.replace("Sold: ", "") | |||
num = num.strip() | |||
sold.append(num) | |||
quant = bae[4].find('small').text | |||
quant = quant.replace("In stock: ", "") | |||
quant = quant.strip() | |||
qLeft.append(quant) | |||
# Finding Successful Transactions | |||
freq = bae[1].text | |||
freq = freq.replace(vendor_name, "") | |||
freq = re.sub(r'Vendor Level \d+', "", freq) | |||
freq = freq.replace("(", "") | |||
freq = freq.replace(")", "") | |||
freq = freq.strip() | |||
success.append(freq) | |||
# Searching for CVE and MS categories | |||
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if not cve: | |||
cveValue="-1" | |||
else: | |||
cee = " " | |||
for idx in cve: | |||
cee += (idx) | |||
cee += " " | |||
cee = cee.replace(',', ' ') | |||
cee = cee.replace('\n', '') | |||
cveValue=cee | |||
CVE.append(cveValue) | |||
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if not ms: | |||
MSValue="-1" | |||
else: | |||
me = " " | |||
for im in ms: | |||
me += (im) | |||
me += " " | |||
me = me.replace(',', ' ') | |||
me = me.replace('\n', '') | |||
MSValue=me | |||
MS.append(MSValue) | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, | |||
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) | |||
#called by the crawler to get description links on a listing page | |||
#@param: beautifulsoup object that is using the correct html page (listing page) | |||
#return: list of description links from a listing page | |||
def lionmarketplace_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.findAll('div', {"class": "container d-flex justify-content-center"}) | |||
for a in listing: | |||
bae = a.find('a', href=True) | |||
link = bae['href'] | |||
href.append(link) | |||
return href |
@ -0,0 +1,297 @@ | |||
__author__ = 'Helium' | |||
''' | |||
Mikes Grand Store Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
from datetime import date | |||
import subprocess | |||
import configparser | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.MikesGrandStore.parser import mikesgrandstore_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
config = configparser.ConfigParser() | |||
config.read('../../setup.ini') | |||
counter = 1 | |||
baseURL = 'http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion/' | |||
# Opens Tor Browser, crawls the website, then parses, then closes tor | |||
#acts like the main method for the crawler, another function at the end of this code calls this function later | |||
def startCrawling(): | |||
opentor() | |||
# mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
# new_parse(forumName, baseURL, False) | |||
# Opens Tor Browser | |||
#prompts for ENTER input to continue | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Returns the name of the website | |||
#return: name of site in string type | |||
def getMKTName(): | |||
name = 'MikesGrandStore' | |||
return name | |||
# Return the base link of the website | |||
#return: url of base site in string type | |||
def getFixedURL(): | |||
url = 'http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion/' | |||
return url | |||
# Closes Tor Browser | |||
#@param: current selenium driver | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 2) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", False) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
#return: return the selenium driver or string 'down' | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha | |||
# then allows for manual solving of captcha in the terminal | |||
#@param: current selenium web driver | |||
def login(driver): | |||
# wait for page to show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, "/html/body/div[1]/header/div/div[3]/div/div/ul/li[6]/a"))) | |||
# Saves the crawled html page, makes the directory path for html pages if not made | |||
def savePage(page, url): | |||
cleanPage = cleanHTML(page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
#@param: raw url as crawler crawls through every site | |||
def getFullPathName(url): | |||
from MarketPlaces.Initialization.markets_mining import CURRENT_DATE | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = r'..\MikesGrandStore\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html' | |||
else: | |||
fullPath = r'..\MikesGrandStore\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html' | |||
return fullPath | |||
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned | |||
#@param: raw url as crawler crawls through every site | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list | |||
#in this example, there are a couple of categories some threads fall under such as | |||
# Guides and Tutorials, Digital Products, and Software and Malware | |||
#as you can see they are categories of products | |||
def getInterestedLinks(): | |||
links = [] | |||
# Hacking and DDOS | |||
links.append('http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion/product-category/hacking/') | |||
# # databases | |||
# links.append('http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion/product-category/databases/') | |||
return links | |||
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through | |||
#topic and description pages are crawled through here, where both types of pages are saved | |||
#@param: selenium driver | |||
def crawlForum(driver): | |||
print("Crawling the MikesGrandStore market") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
count = 0 | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
has_next_page = True | |||
while has_next_page: | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
driver.back() | |||
# comment out | |||
break | |||
# comment out | |||
if count == 1: | |||
count = 0 | |||
break | |||
try: | |||
link = driver.find_element(by=By.XPATH, value= | |||
'/html/body/div[1]/main/div/div[1]/div/div[3]/nav/ul/li[6]/a').get_attribute('href') | |||
if link == "": | |||
raise NoSuchElementException | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("Crawling MikesGrandStore forum done sucessfully. Press ENTER to continue\n") | |||
# Returns 'True' if the link is a description link | |||
#@param: url of any url crawled | |||
#return: true if is a description page, false if not | |||
def isDescriptionLink(url): | |||
if 'product/' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link | |||
#@param: url of any url crawled | |||
#return: true if is a Listing page, false if not | |||
def isListingLink(url): | |||
if 'product-category' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links, the html is the url of a link from the list of interested link list | |||
#@param: link from interested link list ie. getInterestingLinks() | |||
#return: list of description links that should be crawled through | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return mikesgrandstore_links_parser(soup) | |||
# Drop links that "signout" | |||
# def isSignOut(url): | |||
# #absURL = urlparse.urljoin(url.base_url, url.url) | |||
# if 'signout' in url.lower() or 'logout' in url.lower(): | |||
# return True | |||
# | |||
# return False | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,291 @@ | |||
__author__ = 'DarkWeb' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of description page | |||
#return: 'row' that contains a variety of lists that each hold info on the description page | |||
def darkfox_description_parser(soup): | |||
# Fields to be parsed | |||
name = "-1" # 0 Product_Name | |||
describe = "-1" # 1 Product_Description | |||
lastSeen = "-1" # 2 Product_LastViewDate | |||
rules = "-1" # 3 NOT USED ... | |||
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = "-1" # 5 Product_MS_Classification (Microsoft Security) | |||
review = "-1" # 6 Product_Number_Of_Reviews | |||
category = "-1" # 7 Product_Category | |||
shipFrom = "-1" # 8 Product_ShippedFrom | |||
shipTo = "-1" # 9 Product_ShippedTo | |||
left = "-1" # 10 Product_QuantityLeft | |||
escrow = "-1" # 11 Vendor_Warranty | |||
terms = "-1" # 12 Vendor_TermsAndConditions | |||
vendor = "-1" # 13 Vendor_Name | |||
sold = "-1" # 14 Product_QuantitySold | |||
addDate = "-1" # 15 Product_AddedDate | |||
available = "-1" # 16 NOT USED ... | |||
endDate = "-1" # 17 NOT USED ... | |||
BTC = "-1" # 18 Product_BTC_SellingPrice | |||
USD = "-1" # 19 Product_USD_SellingPrice | |||
rating = "-1" # 20 Vendor_Rating | |||
success = "-1" # 21 Vendor_Successful_Transactions | |||
EURO = "-1" # 22 Product_EURO_SellingPrice | |||
# Finding Product Name | |||
name = soup.find('h1').text | |||
name = name.replace('\n', ' ') | |||
name = name.replace(",", "") | |||
name = name.strip() | |||
# Finding Vendor | |||
vendor = soup.find('h3').find('a').text.strip() | |||
# Finding Vendor Rating | |||
rating = soup.find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Successful Transactions | |||
success = soup.find('h3').text | |||
success = success.replace("Vendor: ", "") | |||
success = success.replace(vendor, "") | |||
success = success.replace("(", "") | |||
success = success.replace(")", "") | |||
success = success.strip() | |||
bae = soup.find('div', {'class': "box"}).find_all('ul') | |||
# Finding Prices | |||
USD = bae[1].find('strong').text.strip() | |||
li = bae[2].find_all('li') | |||
# Finding Escrow | |||
escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Category | |||
category = li[1].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Quantity Available | |||
left = li[3].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Number Sold | |||
sold = li[4].find('span', {'class': "tag is-dark"}).text.strip() | |||
li = bae[3].find_all('li') | |||
# Finding Shipment Information (Origin) | |||
if "Ships from:" in li[-2].text: | |||
shipFrom = li[-2].text | |||
shipFrom = shipFrom.replace("Ships from: ", "") | |||
# shipFrom = shipFrom.replace(",", "") | |||
shipFrom = shipFrom.strip() | |||
# Finding Shipment Information (Destination) | |||
shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text | |||
shipTo = shipTo.replace("Ships to: ", "") | |||
shipTo = shipTo.strip() | |||
if "certain countries" in shipTo: | |||
countries = "" | |||
tags = li[-1].find_all('span', {'class': "tag"}) | |||
for tag in tags: | |||
country = tag.text.strip() | |||
countries += country + ", " | |||
shipTo = countries.strip(", ") | |||
# Finding the Product description | |||
describe = soup.find('div', {'class': "pre-line"}).text | |||
describe = describe.replace("\n", " ") | |||
describe = describe.strip() | |||
'''# Finding the Number of Product Reviews | |||
tag = soup.findAll(text=re.compile('Reviews')) | |||
for index in tag: | |||
reviews = index | |||
par = reviews.find('(') | |||
if par >=0: | |||
reviews = reviews.replace("Reviews (","") | |||
reviews = reviews.replace(")","") | |||
reviews = reviews.split(",") | |||
review = str(abs(int(reviews[0])) + abs(int(reviews[1]))) | |||
else : | |||
review = "-1"''' | |||
# Searching for CVE and MS categories | |||
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if cve: | |||
CVE = " " | |||
for idx in cve: | |||
CVE += (idx) | |||
CVE += " " | |||
CVE = CVE.replace(',', ' ') | |||
CVE = CVE.replace('\n', '') | |||
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if ms: | |||
MS = " " | |||
for im in ms: | |||
MS += (im) | |||
MS += " " | |||
MS = MS.replace(',', ' ') | |||
MS = MS.replace('\n', '') | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor, | |||
sold, addDate, available, endDate, BTC, USD, rating, success, EURO) | |||
# Sending the results | |||
return row | |||
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of listing page | |||
#return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def darkfox_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # Total_Products (Should be Integer) | |||
mktName = "DarkFox" # 0 Marketplace_Name | |||
name = [] # 1 Product_Name | |||
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = [] # 3 Product_MS_Classification (Microsoft Security) | |||
category = [] # 4 Product_Category | |||
describe = [] # 5 Product_Description | |||
escrow = [] # 6 Vendor_Warranty | |||
views = [] # 7 Product_Number_Of_Views | |||
reviews = [] # 8 Product_Number_Of_Reviews | |||
addDate = [] # 9 Product_AddDate | |||
lastSeen = [] # 10 Product_LastViewDate | |||
BTC = [] # 11 Product_BTC_SellingPrice | |||
USD = [] # 12 Product_USD_SellingPrice | |||
EURO = [] # 13 Product_EURO_SellingPrice | |||
sold = [] # 14 Product_QuantitySold | |||
qLeft =[] # 15 Product_QuantityLeft | |||
shipFrom = [] # 16 Product_ShippedFrom | |||
shipTo = [] # 17 Product_ShippedTo | |||
vendor = [] # 18 Vendor | |||
rating = [] # 19 Vendor_Rating | |||
success = [] # 20 Vendor_Successful_Transactions | |||
href = [] # 23 Product_Links (Urls) | |||
listing = soup.findAll('div', {"class": "card"}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
bae = a.findAll('a', href=True) | |||
# Adding the url to the list of urls | |||
link = bae[0].get('href') | |||
link = cleanLink(link) | |||
href.append(link) | |||
# Finding the Product | |||
product = bae[1].find('p').text | |||
product = product.replace('\n', ' ') | |||
product = product.replace(",", "") | |||
product = product.replace("...", "") | |||
product = product.strip() | |||
name.append(product) | |||
bae = a.find('div', {'class': "media-content"}).find('div').find_all('div') | |||
if len(bae) >= 5: | |||
# Finding Prices | |||
price = bae[0].text | |||
ud = price.replace(" USD", " ") | |||
# u = ud.replace("$","") | |||
u = ud.replace(",", "") | |||
u = u.strip() | |||
USD.append(u) | |||
# bc = (prc[1]).strip(' BTC') | |||
# BTC.append(bc) | |||
# Finding the Vendor | |||
vendor_name = bae[1].find('a').text | |||
vendor_name = vendor_name.replace(",", "") | |||
vendor_name = vendor_name.strip() | |||
vendor.append(vendor_name) | |||
# Finding the Category | |||
cat = bae[2].find('small').text | |||
cat = cat.replace("Category: ", "") | |||
cat = cat.replace(",", "") | |||
cat = cat.strip() | |||
category.append(cat) | |||
# Finding Number Sold and Quantity Left | |||
num = bae[3].text | |||
num = num.replace("Sold: ", "") | |||
num = num.strip() | |||
sold.append(num) | |||
quant = bae[4].find('small').text | |||
quant = quant.replace("In stock: ", "") | |||
quant = quant.strip() | |||
qLeft.append(quant) | |||
# Finding Successful Transactions | |||
freq = bae[1].text | |||
freq = freq.replace(vendor_name, "") | |||
freq = re.sub(r'Vendor Level \d+', "", freq) | |||
freq = freq.replace("(", "") | |||
freq = freq.replace(")", "") | |||
freq = freq.strip() | |||
success.append(freq) | |||
# Searching for CVE and MS categories | |||
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if not cve: | |||
cveValue="-1" | |||
else: | |||
cee = " " | |||
for idx in cve: | |||
cee += (idx) | |||
cee += " " | |||
cee = cee.replace(',', ' ') | |||
cee = cee.replace('\n', '') | |||
cveValue=cee | |||
CVE.append(cveValue) | |||
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if not ms: | |||
MSValue="-1" | |||
else: | |||
me = " " | |||
for im in ms: | |||
me += (im) | |||
me += " " | |||
me = me.replace(',', ' ') | |||
me = me.replace('\n', '') | |||
MSValue=me | |||
MS.append(MSValue) | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, | |||
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) | |||
#called by the crawler to get description links on a listing page | |||
#@param: beautifulsoup object that is using the correct html page (listing page) | |||
#return: list of description links from a listing page | |||
def mikesgrandstore_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.findAll('div', {"class": "box-image"}) | |||
for a in listing: | |||
bae = a.find('div', {"class": "image-fade_in_back"}).find('a', href=True) | |||
link = bae['href'] | |||
href.append(link) | |||
return href |
@ -0,0 +1,290 @@ | |||
__author__ = 'Helium' | |||
''' | |||
TorBay Market Forum Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, time | |||
from datetime import date | |||
import subprocess | |||
import configparser | |||
import subprocess | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.TorBay.parser import torbay_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
config = configparser.ConfigParser() | |||
config.read('../../setup.ini') | |||
counter = 1 | |||
baseURL = 'http://torbay3253zck4ym5cbowwvrbfjjzruzthrx3np5y6owvifrnhy5ybid.onion/' | |||
# Opens Tor Browser, crawls the website, then parses, then closes tor | |||
#acts like the main method for the crawler, another function at the end of this code calls this function later | |||
def startCrawling(): | |||
opentor() | |||
mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
# new_parse(forumName, baseURL, False) | |||
# Opens Tor Browser | |||
#prompts for ENTER input to continue | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Returns the name of the website | |||
#return: name of site in string type | |||
def getMKTName(): | |||
name = 'TorBay Market' | |||
return name | |||
# Return the base link of the website | |||
#return: url of base site in string type | |||
def getFixedURL(): | |||
url = 'http://torbay3253zck4ym5cbowwvrbfjjzruzthrx3np5y6owvifrnhy5ybid.onion/' | |||
return url | |||
# Closes Tor Browser | |||
#@param: current selenium driver | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 3) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", True) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
#return: return the selenium driver or string 'down' | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha | |||
# then allows for manual solving of captcha in the terminal | |||
#@param: current selenium web driver | |||
def login(driver): | |||
# wait for page to show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, "/html/body/div[2]/div/div/div/ul/li[6]/a"))) | |||
# Saves the crawled html page, makes the directory path for html pages if not made | |||
def savePage(page, url): | |||
cleanPage = cleanHTML(page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
#@param: raw url as crawler crawls through every site | |||
def getFullPathName(url): | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = r'..\TorBay\HTML_Pages\\' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html' | |||
else: | |||
fullPath = r'..\TorBay\HTML_Pages\\' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html' | |||
return fullPath | |||
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned | |||
#@param: raw url as crawler crawls through every site | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list | |||
#in this example, there are a couple of categories some threads fall under such as | |||
# Guides and Tutorials, Digital Products, and Software and Malware | |||
#as you can see they are categories of products | |||
def getInterestedLinks(): | |||
links = [] | |||
# Hacking | |||
links.append('http://torbay3253zck4ym5cbowwvrbfjjzruzthrx3np5y6owvifrnhy5ybid.onion/category/hacking') | |||
return links | |||
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through | |||
#topic and description pages are crawled through here, where both types of pages are saved | |||
#@param: selenium driver | |||
def crawlForum(driver): | |||
print("Crawling the TorBay Market") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
count = 0 | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
has_next_page = True | |||
while has_next_page: | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
driver.back() | |||
#comment out | |||
break | |||
# # comment out | |||
if count == 1: | |||
count = 0 | |||
break | |||
try: | |||
link = driver.find_element(by=By.XPATH, value= | |||
'/html/body/section/div/div/div[2]/div/div[2]/ul/li[3]/a').get_attribute('href') | |||
if link == "": | |||
raise NoSuchElementException | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("Crawling TorBay forum done sucessfully. Press ENTER to continue\n") | |||
# Returns 'True' if the link is a description link | |||
#@param: url of any url crawled | |||
#return: true if is a description page, false if not | |||
def isDescriptionLink(url): | |||
if 'product' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link | |||
#@param: url of any url crawled | |||
#return: true if is a Listing page, false if not | |||
def isListingLink(url): | |||
if 'category' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links, the html is the url of a link from the list of interested link list | |||
#@param: link from interested link list ie. getInterestingLinks() | |||
#return: list of description links that should be crawled through | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return torbay_links_parser(soup) | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,291 @@ | |||
__author__ = 'DarkWeb' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of description page | |||
#return: 'row' that contains a variety of lists that each hold info on the description page | |||
def darkfox_description_parser(soup): | |||
# Fields to be parsed | |||
name = "-1" # 0 Product_Name | |||
describe = "-1" # 1 Product_Description | |||
lastSeen = "-1" # 2 Product_LastViewDate | |||
rules = "-1" # 3 NOT USED ... | |||
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = "-1" # 5 Product_MS_Classification (Microsoft Security) | |||
review = "-1" # 6 Product_Number_Of_Reviews | |||
category = "-1" # 7 Product_Category | |||
shipFrom = "-1" # 8 Product_ShippedFrom | |||
shipTo = "-1" # 9 Product_ShippedTo | |||
left = "-1" # 10 Product_QuantityLeft | |||
escrow = "-1" # 11 Vendor_Warranty | |||
terms = "-1" # 12 Vendor_TermsAndConditions | |||
vendor = "-1" # 13 Vendor_Name | |||
sold = "-1" # 14 Product_QuantitySold | |||
addDate = "-1" # 15 Product_AddedDate | |||
available = "-1" # 16 NOT USED ... | |||
endDate = "-1" # 17 NOT USED ... | |||
BTC = "-1" # 18 Product_BTC_SellingPrice | |||
USD = "-1" # 19 Product_USD_SellingPrice | |||
rating = "-1" # 20 Vendor_Rating | |||
success = "-1" # 21 Vendor_Successful_Transactions | |||
EURO = "-1" # 22 Product_EURO_SellingPrice | |||
# Finding Product Name | |||
name = soup.find('h1').text | |||
name = name.replace('\n', ' ') | |||
name = name.replace(",", "") | |||
name = name.strip() | |||
# Finding Vendor | |||
vendor = soup.find('h3').find('a').text.strip() | |||
# Finding Vendor Rating | |||
rating = soup.find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Successful Transactions | |||
success = soup.find('h3').text | |||
success = success.replace("Vendor: ", "") | |||
success = success.replace(vendor, "") | |||
success = success.replace("(", "") | |||
success = success.replace(")", "") | |||
success = success.strip() | |||
bae = soup.find('div', {'class': "box"}).find_all('ul') | |||
# Finding Prices | |||
USD = bae[1].find('strong').text.strip() | |||
li = bae[2].find_all('li') | |||
# Finding Escrow | |||
escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Category | |||
category = li[1].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Quantity Available | |||
left = li[3].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Number Sold | |||
sold = li[4].find('span', {'class': "tag is-dark"}).text.strip() | |||
li = bae[3].find_all('li') | |||
# Finding Shipment Information (Origin) | |||
if "Ships from:" in li[-2].text: | |||
shipFrom = li[-2].text | |||
shipFrom = shipFrom.replace("Ships from: ", "") | |||
# shipFrom = shipFrom.replace(",", "") | |||
shipFrom = shipFrom.strip() | |||
# Finding Shipment Information (Destination) | |||
shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text | |||
shipTo = shipTo.replace("Ships to: ", "") | |||
shipTo = shipTo.strip() | |||
if "certain countries" in shipTo: | |||
countries = "" | |||
tags = li[-1].find_all('span', {'class': "tag"}) | |||
for tag in tags: | |||
country = tag.text.strip() | |||
countries += country + ", " | |||
shipTo = countries.strip(", ") | |||
# Finding the Product description | |||
describe = soup.find('div', {'class': "pre-line"}).text | |||
describe = describe.replace("\n", " ") | |||
describe = describe.strip() | |||
'''# Finding the Number of Product Reviews | |||
tag = soup.findAll(text=re.compile('Reviews')) | |||
for index in tag: | |||
reviews = index | |||
par = reviews.find('(') | |||
if par >=0: | |||
reviews = reviews.replace("Reviews (","") | |||
reviews = reviews.replace(")","") | |||
reviews = reviews.split(",") | |||
review = str(abs(int(reviews[0])) + abs(int(reviews[1]))) | |||
else : | |||
review = "-1"''' | |||
# Searching for CVE and MS categories | |||
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if cve: | |||
CVE = " " | |||
for idx in cve: | |||
CVE += (idx) | |||
CVE += " " | |||
CVE = CVE.replace(',', ' ') | |||
CVE = CVE.replace('\n', '') | |||
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if ms: | |||
MS = " " | |||
for im in ms: | |||
MS += (im) | |||
MS += " " | |||
MS = MS.replace(',', ' ') | |||
MS = MS.replace('\n', '') | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor, | |||
sold, addDate, available, endDate, BTC, USD, rating, success, EURO) | |||
# Sending the results | |||
return row | |||
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of listing page | |||
#return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def darkfox_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # Total_Products (Should be Integer) | |||
mktName = "DarkFox" # 0 Marketplace_Name | |||
name = [] # 1 Product_Name | |||
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = [] # 3 Product_MS_Classification (Microsoft Security) | |||
category = [] # 4 Product_Category | |||
describe = [] # 5 Product_Description | |||
escrow = [] # 6 Vendor_Warranty | |||
views = [] # 7 Product_Number_Of_Views | |||
reviews = [] # 8 Product_Number_Of_Reviews | |||
addDate = [] # 9 Product_AddDate | |||
lastSeen = [] # 10 Product_LastViewDate | |||
BTC = [] # 11 Product_BTC_SellingPrice | |||
USD = [] # 12 Product_USD_SellingPrice | |||
EURO = [] # 13 Product_EURO_SellingPrice | |||
sold = [] # 14 Product_QuantitySold | |||
qLeft =[] # 15 Product_QuantityLeft | |||
shipFrom = [] # 16 Product_ShippedFrom | |||
shipTo = [] # 17 Product_ShippedTo | |||
vendor = [] # 18 Vendor | |||
rating = [] # 19 Vendor_Rating | |||
success = [] # 20 Vendor_Successful_Transactions | |||
href = [] # 23 Product_Links (Urls) | |||
listing = soup.findAll('div', {"class": "card"}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
bae = a.findAll('a', href=True) | |||
# Adding the url to the list of urls | |||
link = bae[0].get('href') | |||
link = cleanLink(link) | |||
href.append(link) | |||
# Finding the Product | |||
product = bae[1].find('p').text | |||
product = product.replace('\n', ' ') | |||
product = product.replace(",", "") | |||
product = product.replace("...", "") | |||
product = product.strip() | |||
name.append(product) | |||
bae = a.find('div', {'class': "media-content"}).find('div').find_all('div') | |||
if len(bae) >= 5: | |||
# Finding Prices | |||
price = bae[0].text | |||
ud = price.replace(" USD", " ") | |||
# u = ud.replace("$","") | |||
u = ud.replace(",", "") | |||
u = u.strip() | |||
USD.append(u) | |||
# bc = (prc[1]).strip(' BTC') | |||
# BTC.append(bc) | |||
# Finding the Vendor | |||
vendor_name = bae[1].find('a').text | |||
vendor_name = vendor_name.replace(",", "") | |||
vendor_name = vendor_name.strip() | |||
vendor.append(vendor_name) | |||
# Finding the Category | |||
cat = bae[2].find('small').text | |||
cat = cat.replace("Category: ", "") | |||
cat = cat.replace(",", "") | |||
cat = cat.strip() | |||
category.append(cat) | |||
# Finding Number Sold and Quantity Left | |||
num = bae[3].text | |||
num = num.replace("Sold: ", "") | |||
num = num.strip() | |||
sold.append(num) | |||
quant = bae[4].find('small').text | |||
quant = quant.replace("In stock: ", "") | |||
quant = quant.strip() | |||
qLeft.append(quant) | |||
# Finding Successful Transactions | |||
freq = bae[1].text | |||
freq = freq.replace(vendor_name, "") | |||
freq = re.sub(r'Vendor Level \d+', "", freq) | |||
freq = freq.replace("(", "") | |||
freq = freq.replace(")", "") | |||
freq = freq.strip() | |||
success.append(freq) | |||
# Searching for CVE and MS categories | |||
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if not cve: | |||
cveValue="-1" | |||
else: | |||
cee = " " | |||
for idx in cve: | |||
cee += (idx) | |||
cee += " " | |||
cee = cee.replace(',', ' ') | |||
cee = cee.replace('\n', '') | |||
cveValue=cee | |||
CVE.append(cveValue) | |||
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if not ms: | |||
MSValue="-1" | |||
else: | |||
me = " " | |||
for im in ms: | |||
me += (im) | |||
me += " " | |||
me = me.replace(',', ' ') | |||
me = me.replace('\n', '') | |||
MSValue=me | |||
MS.append(MSValue) | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, | |||
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) | |||
#called by the crawler to get description links on a listing page | |||
#@param: beautifulsoup object that is using the correct html page (listing page) | |||
#return: list of description links from a listing page | |||
def torbay_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.find('section', {"id": "content"}).findAll('div', {"class": "product-card"}) | |||
for a in listing: | |||
bae = a.find('div', {"class": "pc-footer"}).find('a', {"class": "btn btn-primary"}, href=True) | |||
link = bae['href'] | |||
href.append(link) | |||
return href |
@ -0,0 +1,299 @@ | |||
__author__ = 'Helium' | |||
''' | |||
TorMarket Forum Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, re, time | |||
from datetime import date | |||
import subprocess | |||
import configparser | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.TorMarket.parser import tormarket_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
config = configparser.ConfigParser() | |||
config.read('../../setup.ini') | |||
counter = 1 | |||
baseURL = 'http://22222253ebafysmwyrl4uxfcs2xm6k7zb4xyse2csne73atvxu53gfad.onion/' | |||
# Opens Tor Browser, crawls the website, then parses, then closes tor | |||
#acts like the main method for the crawler, another function at the end of this code calls this function later | |||
def startCrawling(): | |||
opentor() | |||
# mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
# new_parse(forumName, baseURL, False) | |||
# Opens Tor Browser | |||
#prompts for ENTER input to continue | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path')) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Returns the name of the website | |||
#return: name of site in string type | |||
def getMKTName(): | |||
name = 'TorMarket' | |||
return name | |||
# Return the base link of the website | |||
#return: url of base site in string type | |||
def getFixedURL(): | |||
url = 'http://22222253ebafysmwyrl4uxfcs2xm6k7zb4xyse2csne73atvxu53gfad.onion/' | |||
return url | |||
# Closes Tor Browser | |||
#@param: current selenium driver | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path')) | |||
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path')) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 2) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", False) | |||
ff_prof.update_preferences() | |||
service = Service(config.get('TOR', 'geckodriver_path')) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
#return: return the selenium driver or string 'down' | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha | |||
# then allows for manual solving of captcha in the terminal | |||
#@param: current selenium web driver | |||
def login(driver): | |||
# wait for page to show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, "/html/body/div[2]/div/div/div/main/article/div/section[4]/div/div[1]/div/div/div/div/ul/li[15]/ul/li[3]/a"))) | |||
# Saves the crawled html page, makes the directory path for html pages if not made | |||
def savePage(page, url): | |||
cleanPage = cleanHTML(page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
#@param: raw url as crawler crawls through every site | |||
def getFullPathName(url): | |||
from MarketPlaces.Initialization.markets_mining import CURRENT_DATE | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = r'..\TorMarket\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html' | |||
else: | |||
fullPath = r'..\TorMarket\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html' | |||
return fullPath | |||
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned | |||
#@param: raw url as crawler crawls through every site | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list | |||
#in this example, there are a couple of categories some threads fall under such as | |||
# Guides and Tutorials, Digital Products, and Software and Malware | |||
#as you can see they are categories of products | |||
def getInterestedLinks(): | |||
links = [] | |||
# Hacking Tutorials | |||
links.append('http://22222253ebafysmwyrl4uxfcs2xm6k7zb4xyse2csne73atvxu53gfad.onion/product-category/guides-tutorials/hacking/') | |||
# # Malware | |||
# links.append('http://22222253ebafysmwyrl4uxfcs2xm6k7zb4xyse2csne73atvxu53gfad.onion/product-category/malware/') | |||
# # Hacking Services | |||
# links.append('http://22222253ebafysmwyrl4uxfcs2xm6k7zb4xyse2csne73atvxu53gfad.onion/product-category/services/hacking-services/') | |||
return links | |||
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through | |||
#topic and description pages are crawled through here, where both types of pages are saved | |||
#@param: selenium driver | |||
def crawlForum(driver): | |||
print("Crawling the TorMarket market") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
count = 0 | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
has_next_page = True | |||
while has_next_page: | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
driver.back() | |||
# comment out | |||
break | |||
# comment out | |||
if count == 1: | |||
count = 0 | |||
break | |||
try: | |||
link = driver.find_element(by=By.XPATH, value= | |||
'/html/body/div[2]/div/div/div[1]/main/nav/ul/li[5]/a').get_attribute('href') | |||
if link == "": | |||
raise NoSuchElementException | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("Crawling TorMarket forum done sucessfully. Press ENTER to continue\n") | |||
# Returns 'True' if the link is a description link | |||
#@param: url of any url crawled | |||
#return: true if is a description page, false if not | |||
def isDescriptionLink(url): | |||
if 'shop' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link | |||
#@param: url of any url crawled | |||
#return: true if is a Listing page, false if not | |||
def isListingLink(url): | |||
if 'product-category' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links, the html is the url of a link from the list of interested link list | |||
#@param: link from interested link list ie. getInterestingLinks() | |||
#return: list of description links that should be crawled through | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return tormarket_links_parser(soup) | |||
# Drop links that "signout" | |||
# def isSignOut(url): | |||
# #absURL = urlparse.urljoin(url.base_url, url.url) | |||
# if 'signout' in url.lower() or 'logout' in url.lower(): | |||
# return True | |||
# | |||
# return False | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,291 @@ | |||
__author__ = 'DarkWeb' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of description page | |||
#return: 'row' that contains a variety of lists that each hold info on the description page | |||
def darkfox_description_parser(soup): | |||
# Fields to be parsed | |||
name = "-1" # 0 Product_Name | |||
describe = "-1" # 1 Product_Description | |||
lastSeen = "-1" # 2 Product_LastViewDate | |||
rules = "-1" # 3 NOT USED ... | |||
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = "-1" # 5 Product_MS_Classification (Microsoft Security) | |||
review = "-1" # 6 Product_Number_Of_Reviews | |||
category = "-1" # 7 Product_Category | |||
shipFrom = "-1" # 8 Product_ShippedFrom | |||
shipTo = "-1" # 9 Product_ShippedTo | |||
left = "-1" # 10 Product_QuantityLeft | |||
escrow = "-1" # 11 Vendor_Warranty | |||
terms = "-1" # 12 Vendor_TermsAndConditions | |||
vendor = "-1" # 13 Vendor_Name | |||
sold = "-1" # 14 Product_QuantitySold | |||
addDate = "-1" # 15 Product_AddedDate | |||
available = "-1" # 16 NOT USED ... | |||
endDate = "-1" # 17 NOT USED ... | |||
BTC = "-1" # 18 Product_BTC_SellingPrice | |||
USD = "-1" # 19 Product_USD_SellingPrice | |||
rating = "-1" # 20 Vendor_Rating | |||
success = "-1" # 21 Vendor_Successful_Transactions | |||
EURO = "-1" # 22 Product_EURO_SellingPrice | |||
# Finding Product Name | |||
name = soup.find('h1').text | |||
name = name.replace('\n', ' ') | |||
name = name.replace(",", "") | |||
name = name.strip() | |||
# Finding Vendor | |||
vendor = soup.find('h3').find('a').text.strip() | |||
# Finding Vendor Rating | |||
rating = soup.find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Successful Transactions | |||
success = soup.find('h3').text | |||
success = success.replace("Vendor: ", "") | |||
success = success.replace(vendor, "") | |||
success = success.replace("(", "") | |||
success = success.replace(")", "") | |||
success = success.strip() | |||
bae = soup.find('div', {'class': "box"}).find_all('ul') | |||
# Finding Prices | |||
USD = bae[1].find('strong').text.strip() | |||
li = bae[2].find_all('li') | |||
# Finding Escrow | |||
escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Category | |||
category = li[1].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Quantity Available | |||
left = li[3].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Number Sold | |||
sold = li[4].find('span', {'class': "tag is-dark"}).text.strip() | |||
li = bae[3].find_all('li') | |||
# Finding Shipment Information (Origin) | |||
if "Ships from:" in li[-2].text: | |||
shipFrom = li[-2].text | |||
shipFrom = shipFrom.replace("Ships from: ", "") | |||
# shipFrom = shipFrom.replace(",", "") | |||
shipFrom = shipFrom.strip() | |||
# Finding Shipment Information (Destination) | |||
shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text | |||
shipTo = shipTo.replace("Ships to: ", "") | |||
shipTo = shipTo.strip() | |||
if "certain countries" in shipTo: | |||
countries = "" | |||
tags = li[-1].find_all('span', {'class': "tag"}) | |||
for tag in tags: | |||
country = tag.text.strip() | |||
countries += country + ", " | |||
shipTo = countries.strip(", ") | |||
# Finding the Product description | |||
describe = soup.find('div', {'class': "pre-line"}).text | |||
describe = describe.replace("\n", " ") | |||
describe = describe.strip() | |||
'''# Finding the Number of Product Reviews | |||
tag = soup.findAll(text=re.compile('Reviews')) | |||
for index in tag: | |||
reviews = index | |||
par = reviews.find('(') | |||
if par >=0: | |||
reviews = reviews.replace("Reviews (","") | |||
reviews = reviews.replace(")","") | |||
reviews = reviews.split(",") | |||
review = str(abs(int(reviews[0])) + abs(int(reviews[1]))) | |||
else : | |||
review = "-1"''' | |||
# Searching for CVE and MS categories | |||
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if cve: | |||
CVE = " " | |||
for idx in cve: | |||
CVE += (idx) | |||
CVE += " " | |||
CVE = CVE.replace(',', ' ') | |||
CVE = CVE.replace('\n', '') | |||
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if ms: | |||
MS = " " | |||
for im in ms: | |||
MS += (im) | |||
MS += " " | |||
MS = MS.replace(',', ' ') | |||
MS = MS.replace('\n', '') | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor, | |||
sold, addDate, available, endDate, BTC, USD, rating, success, EURO) | |||
# Sending the results | |||
return row | |||
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of listing page | |||
#return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def darkfox_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # Total_Products (Should be Integer) | |||
mktName = "DarkFox" # 0 Marketplace_Name | |||
name = [] # 1 Product_Name | |||
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = [] # 3 Product_MS_Classification (Microsoft Security) | |||
category = [] # 4 Product_Category | |||
describe = [] # 5 Product_Description | |||
escrow = [] # 6 Vendor_Warranty | |||
views = [] # 7 Product_Number_Of_Views | |||
reviews = [] # 8 Product_Number_Of_Reviews | |||
addDate = [] # 9 Product_AddDate | |||
lastSeen = [] # 10 Product_LastViewDate | |||
BTC = [] # 11 Product_BTC_SellingPrice | |||
USD = [] # 12 Product_USD_SellingPrice | |||
EURO = [] # 13 Product_EURO_SellingPrice | |||
sold = [] # 14 Product_QuantitySold | |||
qLeft =[] # 15 Product_QuantityLeft | |||
shipFrom = [] # 16 Product_ShippedFrom | |||
shipTo = [] # 17 Product_ShippedTo | |||
vendor = [] # 18 Vendor | |||
rating = [] # 19 Vendor_Rating | |||
success = [] # 20 Vendor_Successful_Transactions | |||
href = [] # 23 Product_Links (Urls) | |||
listing = soup.findAll('div', {"class": "card"}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
bae = a.findAll('a', href=True) | |||
# Adding the url to the list of urls | |||
link = bae[0].get('href') | |||
link = cleanLink(link) | |||
href.append(link) | |||
# Finding the Product | |||
product = bae[1].find('p').text | |||
product = product.replace('\n', ' ') | |||
product = product.replace(",", "") | |||
product = product.replace("...", "") | |||
product = product.strip() | |||
name.append(product) | |||
bae = a.find('div', {'class': "media-content"}).find('div').find_all('div') | |||
if len(bae) >= 5: | |||
# Finding Prices | |||
price = bae[0].text | |||
ud = price.replace(" USD", " ") | |||
# u = ud.replace("$","") | |||
u = ud.replace(",", "") | |||
u = u.strip() | |||
USD.append(u) | |||
# bc = (prc[1]).strip(' BTC') | |||
# BTC.append(bc) | |||
# Finding the Vendor | |||
vendor_name = bae[1].find('a').text | |||
vendor_name = vendor_name.replace(",", "") | |||
vendor_name = vendor_name.strip() | |||
vendor.append(vendor_name) | |||
# Finding the Category | |||
cat = bae[2].find('small').text | |||
cat = cat.replace("Category: ", "") | |||
cat = cat.replace(",", "") | |||
cat = cat.strip() | |||
category.append(cat) | |||
# Finding Number Sold and Quantity Left | |||
num = bae[3].text | |||
num = num.replace("Sold: ", "") | |||
num = num.strip() | |||
sold.append(num) | |||
quant = bae[4].find('small').text | |||
quant = quant.replace("In stock: ", "") | |||
quant = quant.strip() | |||
qLeft.append(quant) | |||
# Finding Successful Transactions | |||
freq = bae[1].text | |||
freq = freq.replace(vendor_name, "") | |||
freq = re.sub(r'Vendor Level \d+', "", freq) | |||
freq = freq.replace("(", "") | |||
freq = freq.replace(")", "") | |||
freq = freq.strip() | |||
success.append(freq) | |||
# Searching for CVE and MS categories | |||
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if not cve: | |||
cveValue="-1" | |||
else: | |||
cee = " " | |||
for idx in cve: | |||
cee += (idx) | |||
cee += " " | |||
cee = cee.replace(',', ' ') | |||
cee = cee.replace('\n', '') | |||
cveValue=cee | |||
CVE.append(cveValue) | |||
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if not ms: | |||
MSValue="-1" | |||
else: | |||
me = " " | |||
for im in ms: | |||
me += (im) | |||
me += " " | |||
me = me.replace(',', ' ') | |||
me = me.replace('\n', '') | |||
MSValue=me | |||
MS.append(MSValue) | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, | |||
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) | |||
#called by the crawler to get description links on a listing page | |||
#@param: beautifulsoup object that is using the correct html page (listing page) | |||
#return: list of description links from a listing page | |||
def tormarket_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.findAll('div', {"class": "product-loop-content text-center"}) | |||
for a in listing: | |||
bae = a.find('h2', {"class": "woocommerce-loop-product__title"}).find('a', href=True) | |||
link = bae['href'] | |||
href.append(link) | |||
return href |