Browse Source

Merge branch 'main' of https://gitlab.com/dw9372422/dw_pipeline_test into khoi-dev

main
Khoi 1 year ago
parent
commit
e3d431b078
25 changed files with 2922 additions and 430 deletions
  1. +3
    -1
      .idea/DW_Pipeline_Test.iml
  2. +1
    -1
      .idea/misc.xml
  3. BIN
      Forums/Altenens/captcha.png
  4. +257
    -0
      Forums/Altenens/crawler_mechanize.py
  5. +45
    -19
      Forums/CrackingPro/crawler_selenium.py
  6. +8
    -6
      Forums/CrackingPro/parser.py
  7. +30
    -28
      Forums/CryptBB/crawler_selenium.py
  8. +21
    -36
      Forums/CryptBB/parser.py
  9. +11
    -19
      Forums/DB_Connection/db_connection.py
  10. +8
    -9
      Forums/Initialization/forums_mining.py
  11. +118
    -0
      Forums/Initialization/geckodriver.log
  12. +18
    -23
      Forums/Initialization/prepare_parser.py
  13. +1
    -1
      Forums/Utilities/utilities.py
  14. +109
    -77
      MarketPlaces/DB_Connection/db_connection.py
  15. +32
    -0
      MarketPlaces/Initialization/geckodriver.log
  16. +1
    -1
      MarketPlaces/Initialization/marketsList.txt
  17. +6
    -3
      MarketPlaces/Initialization/markets_mining.py
  18. +59
    -95
      MarketPlaces/Initialization/prepare_parser.py
  19. +309
    -0
      MarketPlaces/ThiefWorld/crawler_selenium.py
  20. +1483
    -0
      MarketPlaces/ThiefWorld/geckodriver.log
  21. +291
    -0
      MarketPlaces/ThiefWorld/parser.py
  22. +25
    -30
      MarketPlaces/Tor2door/crawler_selenium.py
  23. +44
    -55
      MarketPlaces/Tor2door/parser.py
  24. +28
    -26
      MarketPlaces/Utilities/utilities.py
  25. +14
    -0
      setup.ini

+ 3
- 1
.idea/DW_Pipeline_Test.iml View File

@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4"> <module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager"> <component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" /> <content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="C:\ProgramData\Anaconda3" jdkType="Python SDK" />
<orderEntry type="jdk" jdkName="Python 3.11 (venv)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
<component name="PyNamespacePackagesService"> <component name="PyNamespacePackagesService">
@ -12,6 +12,8 @@
<option value="$MODULE_DIR$/Forums/CryptBB" /> <option value="$MODULE_DIR$/Forums/CryptBB" />
<option value="$MODULE_DIR$/MarketPlaces/DarkFox" /> <option value="$MODULE_DIR$/MarketPlaces/DarkFox" />
<option value="$MODULE_DIR$/MarketPlaces/Tor2door" /> <option value="$MODULE_DIR$/MarketPlaces/Tor2door" />
<option value="$MODULE_DIR$/Forums/OnniForums" />
<option value="$MODULE_DIR$/MarketPlaces/ThiefWorld" />
</list> </list>
</option> </option>
</component> </component>

+ 1
- 1
.idea/misc.xml View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="C:\ProgramData\Anaconda3" project-jdk-type="Python SDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11 (venv)" project-jdk-type="Python SDK" />
</project> </project>

BIN
Forums/Altenens/captcha.png View File

Before After
Width: 200  |  Height: 60  |  Size: 16 KiB

+ 257
- 0
Forums/Altenens/crawler_mechanize.py View File

@ -0,0 +1,257 @@
__author__ = '91Shadows'
'''
CryptBB Crawler (Mechanize)
'''
import codecs, os, re
import socks, socket, time
from datetime import date
import urllib.parse as urlparse
import http.client as httplib
import mechanize
import subprocess
from bs4 import BeautifulSoup
from Forums.Initialization.prepare_parser import new_parse
from Forums.BestCardingWorld.parser import bestcardingworld_links_parser
counter = 1
httplib.HTTPConnection._http_vsn = 10
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'
baseURL = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=42&sid=ee2cbfd73c12923d979790b2bb4bdfd5'
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150)
# Opens Tor Browser, crawls the website
def startCrawling():
opentor()
getUrl()
forumName = getForumName()
br = getAccess()
if br != 'down':
crawlForum(br)
new_parse(forumName, False)
# new_parse(forumName, False)
closetor()
# Opens Tor Browser
def opentor():
global pid
print("Connecting Tor...")
path = open('../../path.txt').readline()
pro = subprocess.Popen(path)
pid = pro.pid
time.sleep(7.5)
input("Tor Connected. Press ENTER to continue\n")
return
# Creates a connection through Tor Port
def getUrl(timeout=None):
socket.socket = socks.socksocket
socket.create_connection = create_connection
return
# Makes the onion address request
def create_connection(address, timeout=None, source_address=None):
sock = socks.socksocket()
sock.connect(address)
return sock
# Returns the name of website
def getForumName():
name = 'CryptBB'
return name
# Return the link of website
def getFixedURL():
url = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=42&sid=ee2cbfd73c12923d979790b2bb4bdfd5'
return url
# Closes Tor Browser
def closetor():
global pid
os.system("taskkill /pid " + str(pid))
print('Closing Tor...')
time.sleep(3)
return
# Creates a Mechanize browser and initializes its options
def createBrowser():
br = mechanize.Browser()
cj = mechanize.CookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'),
('Accept', '*/*')]
return br
def getAccess():
url = getFixedURL()
br = createBrowser()
try:
br.open(url)
return br
except:
return 'down'
# Saves the crawled html page
def savePage(page, url):
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
a = page.read()
open(filePath, "wb").write(a)
return
# Gets the full path of the page to be saved along with its appropriate file name
def getFullPathName(url):
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = 'C:/Users/CALSysLab/Documents/threatIntelligence-main/DarkWebMining_Working/Forums/BestCardingWorld/HTML_Pages/' + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + '/' + 'Description/' + fileName + '.html'
else:
fullPath = 'C:/Users/CALSysLab/Documents/threatIntelligence-main/DarkWebMining_Working/Forums/BestCardingWorld/HTML_Pages/' + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + '/' + 'Listing/' + fileName + '.html'
return fullPath
# Creates the name of the file based on URL
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if (name == ''):
name = str(counter)
counter = counter + 1
return name
# Hacking and Markets related topics
def getInterestedLinks():
links = []
links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=43&sid=e12864ffccc5df877b03b573534955be')
return links
# Start crawling Forum pages
def crawlForum(br):
print("Crawling CryptBB forum")
linksToCrawl = getInterestedLinks()
visited = set(linksToCrawl)
initialTime = time.time()
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
page = br.open(link)
savePage(page, link)
res = br.response().read()
soup = BeautifulSoup(res, 'html.parser')
next_link = soup.find("a", {"rel": "next"})
if next_link != None:
full_url = urlparse.urljoin(linksToCrawl[i], next_link['href'])
linksToCrawl.insert(i + 1, full_url)
listOfTopics = findDescriptionPages(link)
for topic in listOfTopics:
itemPage = br.open(str(topic))
savePage(itemPage, topic)
except Exception as e:
print('Error getting link: ', link, e)
i += 1
# finalTime = time.time()
# print finalTime - initialTime
input("CryptBB forum done sucessfully. Press ENTER to continue\n")
return
# Returns True if the link is 'Topic' Links, may need to change for diff websites
def isDescriptionLink(url):
if 'topic' in url:
return True
return False
# Returns True if the link is a listingPage link, may need to change for diff websites
def isListingLink(url):
'''
reg = 'board=[0-9]+.[0-9]+\Z'
if len(re.findall(reg, url)) == 0:
return False
return True
'''
if 'forum' in url:
return True
return False
# calling the parser to define the links
def findDescriptionPages(url):
soup = ""
error = False
try:
html = codecs.open(
"C:\\Users\\CALSysLab\\Documents\\threatIntelligence-main\\DarkWebMining_Working\\Forums\\BestCardingWorld\\HTML_Pages\\" + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + "\\Listing\\" + getNameFromURL(url) + ".html", encoding='utf8')
soup = BeautifulSoup(html, "html.parser")
except:
try:
html = open(
"C:\\Users\\CALSysLab\\Documents\\threatIntelligence-main\\DarkWebMining_Working\\Forums\\BestCardingWorld\\HTML_Pages\\" + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + "\\Listing\\" + getNameFromURL(url) + ".html")
soup = BeautifulSoup(html, "html.parser")
except:
error = True
print("There was a problem to read the file " + getNameFromURL(url) + " in the listing section.")
if not error:
return bestcardingworld_links_parser(soup)
else:
return []
def crawler():
startCrawling()
print("Crawling and Parsing CryptBB .... DONE!")

+ 45
- 19
Forums/CrackingPro/crawler_selenium.py View File

@ -20,7 +20,7 @@ from datetime import date
import subprocess import subprocess
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from Forums.Initialization.prepare_parser import new_parse from Forums.Initialization.prepare_parser import new_parse
from Forums.OnniForums.parser import cryptBB_links_parser
from Forums.CrackingPro.parser import crackingPro_links_parser
from Forums.Utilities.utilities import cleanHTML from Forums.Utilities.utilities import cleanHTML
counter = 1 counter = 1
@ -58,7 +58,7 @@ def opentor():
# Login using premade account credentials and do login captcha manually # Login using premade account credentials and do login captcha manually
def login(driver): def login(driver):
'''
#click login button #click login button
login_link = driver.find_element( login_link = driver.find_element(
by=By.ID, value='elUserSignIn').\ by=By.ID, value='elUserSignIn').\
@ -73,7 +73,7 @@ def login(driver):
#Password here #Password here
passwordBox.send_keys('Gr33nSp@m&3ggs') passwordBox.send_keys('Gr33nSp@m&3ggs')
'''
input("Press ENTER when log in is completed\n") input("Press ENTER when log in is completed\n")
@ -237,27 +237,53 @@ def crawlForum(driver):
has_next_page = True has_next_page = True
while has_next_page: while has_next_page:
list = topicPages(html)#parses?
list = topicPages(html) # for multiple pages
for item in list: for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver.page_source, item)
driver.back()
# variable to check if there is a next page for the topic
has_next_topic_page = True
back_counter = 1
# check if there is a next page for the topics
while has_next_topic_page:
# try to access next page of th topic
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver.page_source, item)
# if there is a next page then go and save....
# next page in the topic?
try:
temp = driver.find_element(by=By.ID, value='comments') #
temp2 = temp.find_elements(by=By.XPATH, value='/html/body/main/div/div/div/div[4]/div[1]')
temp3 = temp2.find_elements(by=By.CLASS_NAME, value='ipsPagination')#/html/body/main/div/div/div/div[4]/div[1]
item = temp3.find_element(by=By.CLASS_NAME, value='ipsPagination_next').get_attribute('href') # /html/body/div/div[2]/div/div[2]/div
if item == "":
raise NoSuchElementException
has_next_topic_page = False
else:
back_counter += 1
except NoSuchElementException:
has_next_topic_page = False
# end of loop
for i in range(back_counter):
driver.back()
# comment out # comment out
break break
# comment out # comment out
#if count == 1:
# count = 0
# break
# if count == 1:
# count = 0
# break
try:
temp = driver.find_element(by=By.XPATH, value=
'/html/body/main/div/div/div/div[4]/div/div[1]/div/ul/')
link = temp.find_element(by=By.CLASS_NAME, value='ipsPagination_next').get_attribute('href')
try: # change depending on web page, #next page
temp = driver.find_element(by=By.XPATH, value='/html/body/main/div/div/div/div[4]/div/div[1]/div')#/html/body/main/div/div/div/div[4]/div/div[1]/div
temp2 = temp.find_element(by=By.CLASS_NAME, value='ipsPagination')
link = temp2.find_element(by=By.CLASS_NAME, value='ipsPagination_next').get_attribute('href')
if link == "": if link == "":
raise NoSuchElementException raise NoSuchElementException
@ -300,7 +326,7 @@ def isListingLink(url):
def topicPages(html): def topicPages(html):
soup = BeautifulSoup(html, "html.parser") soup = BeautifulSoup(html, "html.parser")
#print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text) #print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text)
return cryptBB_links_parser(soup)
return crackingPro_links_parser(soup)
def crawler(): def crawler():


+ 8
- 6
Forums/CrackingPro/parser.py View File

@ -334,20 +334,22 @@ def onniForums_listing_parser(soup):
#return organizeTopics("TheMajesticGarden", nm, topic, board, view, post, user, addDate, href) #return organizeTopics("TheMajesticGarden", nm, topic, board, view, post, user, addDate, href)
def cryptBB_links_parser(soup):
def crackingPro_links_parser(soup):
# Returning all links that should be visited by the Crawler # Returning all links that should be visited by the Crawler
href = [] href = []
#print(soup.find('table', {"class": "tborder clear"}).find( #print(soup.find('table', {"class": "tborder clear"}).find(
# 'tbody').find_all('tr', {"class": "inline_row"})) # 'tbody').find_all('tr', {"class": "inline_row"}))
listing = soup.find('table', {"class": "tborder clear"}).find('tbody').find_all('tr', {"class": "inline_row"})
listing = soup.find('ol', {"data-role": "tableRows"}).find_all('div', {"class": "ipsDataItem_main"})
for a in listing: for a in listing:
try:
link = a.find('span', {"class": "subject_old"}).find('a').get('href')
except:
link = a.find('span', {"class": "subject_new"}).find('a').get('href')
#try:
link = a.find('span', {"class": "ipsType_break ipsContained"}).find('a').get('href')
#except:
# link = a.find('span', {"class": "subject_new"}).find('a').get('href')
href.append(link) href.append(link)

+ 30
- 28
Forums/CryptBB/crawler_selenium.py View File

@ -12,17 +12,19 @@ from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.ui import WebDriverWait
from PIL import Image
from PIL import Image
import urllib.parse as urlparse import urllib.parse as urlparse
import os, re, time import os, re, time
from datetime import date
import subprocess import subprocess
import configparser
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from Forums.Initialization.prepare_parser import new_parse from Forums.Initialization.prepare_parser import new_parse
from Forums.CryptBB.parser import cryptBB_links_parser from Forums.CryptBB.parser import cryptBB_links_parser
from Forums.Utilities.utilities import cleanHTML from Forums.Utilities.utilities import cleanHTML
config = configparser.ConfigParser()
config.read('../../setup.ini')
counter = 1 counter = 1
baseURL = 'http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/' baseURL = 'http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/'
@ -41,15 +43,14 @@ def startCrawling():
print(driver.current_url, e) print(driver.current_url, e)
closetor(driver) closetor(driver)
new_parse(forumName, False)
# new_parse(forumName, baseURL, False)
# Opens Tor Browser # Opens Tor Browser
def opentor(): def opentor():
global pid global pid
print("Connecting Tor...") print("Connecting Tor...")
path = open('../../path.txt').readline().strip()
pro = subprocess.Popen(path)
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
pid = pro.pid pid = pro.pid
time.sleep(7.5) time.sleep(7.5)
input('Tor Connected. Press ENTER to continue\n') input('Tor Connected. Press ENTER to continue\n')
@ -132,12 +133,9 @@ def closetor(driver):
# Creates FireFox 'driver' and configure its 'Profile' # Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket # to use Tor proxy and socket
def createFFDriver(): def createFFDriver():
file = open('../../path.txt', 'r')
lines = file.readlines()
ff_binary = FirefoxBinary(lines[0].strip())
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(lines[1].strip())
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False) ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
@ -145,7 +143,7 @@ def createFFDriver():
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False) ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)#
ff_prof.set_preference("network.dns.disablePrefetch", True)
ff_prof.set_preference("network.http.sendRefererHeader", 0) ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3) ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2) ff_prof.set_preference("browser.download.folderList", 2)
@ -159,7 +157,7 @@ def createFFDriver():
ff_prof.set_preference("javascript.enabled", True) ff_prof.set_preference("javascript.enabled", True)
ff_prof.update_preferences() ff_prof.update_preferences()
service = Service(lines[2].strip())
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
@ -170,10 +168,10 @@ def getAccess():
url = getFixedURL() url = getFixedURL()
driver = createFFDriver() driver = createFFDriver()
try: try:
driver.get(url)# open url in browser
driver.get(url)
return driver return driver
except: except:
driver.close()# close tab
driver.close()
return 'down' return 'down'
@ -188,15 +186,12 @@ def savePage(page, url):
# Gets the full path of the page to be saved along with its appropriate file name # Gets the full path of the page to be saved along with its appropriate file name
def getFullPathName(url): def getFullPathName(url):
from Forums.Initialization.forums_mining import CURRENT_DATE
fileName = getNameFromURL(url) fileName = getNameFromURL(url)
if isDescriptionLink(url): if isDescriptionLink(url):
fullPath = r'..\\CryptBB\\HTML_Pages\\' + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html'
fullPath = r'..\\CryptBB\\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html'
else: else:
fullPath = r'..\\CryptBB\\HTML_Pages\\' + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html'
fullPath = r'..\\CryptBB\\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html'
return fullPath return fullPath
@ -204,7 +199,7 @@ def getFullPathName(url):
def getNameFromURL(url): def getNameFromURL(url):
global counter global counter
name = ''.join(e for e in url if e.isalnum()) name = ''.join(e for e in url if e.isalnum())
if (name == ''):
if name == '':
name = str(counter) name = str(counter)
counter = counter + 1 counter = counter + 1
return name return name
@ -226,7 +221,7 @@ def getInterestedLinks():
# # Training Challenges # # Training Challenges
# links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=96') # links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=96')
# Darknet Discussions # Darknet Discussions
#links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=88')
# links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=88')
# # Public Leaks and Warez # # Public Leaks and Warez
# links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=97') # links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=97')
# # Hacked Accounts and Database Dumps # # Hacked Accounts and Database Dumps
@ -251,7 +246,7 @@ def crawlForum(driver):
print('Crawling :', link) print('Crawling :', link)
try: try:
try: try:
driver.get(link)# open
driver.get(link)
except: except:
driver.refresh() driver.refresh()
html = driver.page_source html = driver.page_source
@ -259,10 +254,17 @@ def crawlForum(driver):
has_next_page = True has_next_page = True
#loop through the topics
while has_next_page: while has_next_page:
list = topicPages(html)# for multiple pages
list = topicPages(html)
for item in list: for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver.page_source, item)
driver.back()
'''
#variable to check if there is a next page for the topic #variable to check if there is a next page for the topic
has_next_topic_page = True has_next_topic_page = True
counter = 1 counter = 1
@ -291,9 +293,10 @@ def crawlForum(driver):
except NoSuchElementException: except NoSuchElementException:
has_next_topic_page = False has_next_topic_page = False
#end of loop
# end of loop
for i in range(counter): for i in range(counter):
driver.back() driver.back()
'''
# comment out # comment out
break break
@ -302,7 +305,7 @@ def crawlForum(driver):
count = 0 count = 0
break break
try:# change depending on web page, #next page
try:
temp = driver.find_element(by=By.XPATH, value = '/html/body/div/div[2]/div/div[2]/div') temp = driver.find_element(by=By.XPATH, value = '/html/body/div/div[2]/div/div[2]/div')
link = temp.find_element(by=By.CLASS_NAME, value='pagination_next').get_attribute('href') link = temp.find_element(by=By.CLASS_NAME, value='pagination_next').get_attribute('href')
@ -346,7 +349,6 @@ def isListingLink(url):
# calling the parser to define the links # calling the parser to define the links
def topicPages(html): def topicPages(html):
soup = BeautifulSoup(html, "html.parser") soup = BeautifulSoup(html, "html.parser")
#print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text)
return cryptBB_links_parser(soup) return cryptBB_links_parser(soup)


+ 21
- 36
Forums/CryptBB/parser.py View File

@ -15,15 +15,15 @@ def cryptBB_description_parser(soup):
# Fields to be parsed # Fields to be parsed
topic = "-1" # topic name
user = [] # all users of each post
addDate = [] # all dated of each post
feedback = [] # all feedbacks of each vendor (this was found in just one Forum and with a number format)
status = [] # all user's authority in each post such as (adm, member, dangerous)
reputation = [] # all user's karma in each post (usually found as a number)
sign = [] # all user's signature in each post (usually a standard message after the content of the post)
post = [] # all messages of each post
interest = [] # all user's interest in each post
topic = "-1" # 0 *topic name
user = [] # 1 *all users of each post
status = [] # 2 all user's authority in each post such as (adm, member, dangerous)
reputation = [] # 3 all user's karma in each post (usually found as a number)
interest = [] # 4 all user's interest in each post
sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post)
post = [] # 6 all messages of each post
feedback = [] # 7 all feedbacks of each vendor (this was found in just one Forum and with a number format)
addDate = [] # 8 all dated of each post
# Finding the topic (should be just one coming from the Listing Page) # Finding the topic (should be just one coming from the Listing Page)
@ -154,20 +154,6 @@ def cryptBB_description_parser(soup):
feedback.append("-1") feedback.append("-1")
'''
except:
if soup.find('td', {"class": "trow1"}).text == " You do not have permission to access this page. ":
user.append("-1")
status.append(-1)
interest.append(-1)
reputation.append(-1)
addDate.append(-1)
post.append("NO ACCESS TO THIS PAGE!")
sign.append(-1)
feedback.append(-1)
'''
# Populate the final variable (this should be a list with all fields scraped) # Populate the final variable (this should be a list with all fields scraped)
row = (topic, user, status, reputation, interest, sign, post, feedback, addDate) row = (topic, user, status, reputation, interest, sign, post, feedback, addDate)
@ -180,17 +166,17 @@ def cryptBB_description_parser(soup):
def cryptBB_listing_parser(soup): def cryptBB_listing_parser(soup):
board = "-1" # board name (the previous level of the topic in the Forum categorization tree.
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
nm = 0 # this variable should receive the number of topics
topic = [] # all topics
author = [] # all authors of each topic
views = [] # number of views of each topic
posts = [] # number of posts of each topic
addDate = [] # when the topic was created (difficult to find)
href = [] # this variable should receive all cleaned urls (we will use this to do the marge between
# Listing and Description pages)
nm = 0 # *this variable should receive the number of topics
forum = "CryptBB" # 0 *forum name
board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree.
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
topic = [] # 2 *all topics
author = [] # 3 *all authors of each topic
views = [] # 4 number of views of each topic
posts = [] # 5 number of posts of each topic
href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between
# Listing and Description pages)
addDate = [] # 7 when the topic was created (difficult to find)
# Finding the board (should be just one) # Finding the board (should be just one)
@ -223,7 +209,6 @@ def cryptBB_listing_parser(soup):
link = itopic.find('span', {"class": "subject_old"}).find('a').get('href') link = itopic.find('span', {"class": "subject_old"}).find('a').get('href')
except: except:
link = itopic.find('span',{"class": "subject_new"}).find('a').get('href') link = itopic.find('span',{"class": "subject_new"}).find('a').get('href')
link = cleanLink(link)
href.append(link) href.append(link)
# Finding the author of the topic # Finding the author of the topic
@ -245,7 +230,7 @@ def cryptBB_listing_parser(soup):
addDate.append("-1") addDate.append("-1")
return organizeTopics("CryptBB", nm, topic, board, author, views, posts, href, addDate)
return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate)
def cryptBB_links_parser(soup): def cryptBB_links_parser(soup):


+ 11
- 19
Forums/DB_Connection/db_connection.py View File

@ -2,29 +2,21 @@ __author__ = 'DarkWeb'
import psycopg2 import psycopg2
import traceback import traceback
import time
from datetime import date
import configparser
'''
user
add
forum_id
user_reputation
status_user
interest_user
post
remove
user_reputation
status_user
interest_user
'''
def connectDataBase(): def connectDataBase():
try: try:
return psycopg2.connect(host='localhost', user='postgres', password='password', dbname='darkweb_markets_forums')
config = configparser.ConfigParser()
config.read('../../setup.ini')
ip = config.get('PostgreSQL', 'ip')
username = config.get('PostgreSQL', 'username')
password = config.get('PostgreSQL', 'password')
database = config.get('PostgreSQL', 'database')
return psycopg2.connect(host=ip, user=username, password=password, dbname=database)
except: except:
@ -211,7 +203,7 @@ def getLastPost(cur):
''' '''
def create_forum(cur, row):
def create_forum(cur, row, url):
forumId = verifyForum(cur, row[0]) forumId = verifyForum(cur, row[0])
@ -221,7 +213,7 @@ def create_forum(cur, row):
sql = "Insert into forums (forum_id, name_forum, url_forum, dateinserted_forum) Values (%s, %s, %s, %s)" sql = "Insert into forums (forum_id, name_forum, url_forum, dateinserted_forum) Values (%s, %s, %s, %s)"
recset = [forumId, row[0], None, row[8]]
recset = [forumId, row[0], url, row[8]]
cur.execute(sql, recset) cur.execute(sql, recset)


+ 8
- 9
Forums/Initialization/forums_mining.py View File

@ -9,13 +9,15 @@ from datetime import *
from Forums.BestCardingWorld.crawler_selenium import crawler as crawlerBestCardingWorld from Forums.BestCardingWorld.crawler_selenium import crawler as crawlerBestCardingWorld
from Forums.CryptBB.crawler_selenium import crawler as crawlerCryptBB from Forums.CryptBB.crawler_selenium import crawler as crawlerCryptBB
from Forums.OnniForums.crawler_selenium import crawler as crawlerOnniForums from Forums.OnniForums.crawler_selenium import crawler as crawlerOnniForums
#from Forums.CrackingPro.crawler_selenium import crawler as crawlerCrackingPro
# from Forums.CrackingPro.crawler_selenium import crawler as crawlerCrackingPro
import time import time
CURRENT_DATE = str("%02d" % date.today().month) + str("%02d" % date.today().day) + str("%04d" % date.today().year)
# reads list of marketplaces manually inputted # reads list of marketplaces manually inputted
def getForums(): def getForums():
@ -33,8 +35,6 @@ def createDirectory(forum):
pagesMainDir = '../' + forum pagesMainDir = '../' + forum
else: else:
pagesMainDir = '../' + forum + "/HTML_Pages" pagesMainDir = '../' + forum + "/HTML_Pages"
# sharedFolderPath = r'\\VBoxSvr\VM_Files_(shared)'
# pagesMainDir = os.path.join(sharedFolderPath, 'HTML/Forums/' + forum + '/HTML_Pages')
if not os.path.isdir(pagesMainDir): if not os.path.isdir(pagesMainDir):
os.makedirs(pagesMainDir) os.makedirs(pagesMainDir)
@ -61,7 +61,7 @@ def createRedditsSubdirectories(pagesMainDir):
def createSubdirectories(pagesDir): def createSubdirectories(pagesDir):
currentDateDir = pagesDir + '/' + str("%02d" %date.today().month) + str("%02d" %date.today().day) + str("%04d" %date.today().year)
currentDateDir = pagesDir + '/' + CURRENT_DATE
if not os.path.isdir(currentDateDir): if not os.path.isdir(currentDateDir):
os.mkdir(currentDateDir) os.mkdir(currentDateDir)
@ -82,19 +82,19 @@ def createSubdirectories(pagesDir):
os.mkdir(descReadDir) os.mkdir(descReadDir)
#main method
# main method
if __name__ == '__main__': if __name__ == '__main__':
#assignment from forumsList.txt
# assignment from forumsList.txt
forumsList = getForums() forumsList = getForums()
#get forum from forumsList
# get forum from forumsList
for forum in forumsList: for forum in forumsList:
forum = forum.replace('\n','') forum = forum.replace('\n','')
print("Creating listing and description directories ... for " + forum) print("Creating listing and description directories ... for " + forum)
createDirectory(forum) createDirectory(forum)
time.sleep(5) #wait for directories to be created
time.sleep(5) # wait for directories to be created
input("Directories created successfully. Press ENTER to continue\n") input("Directories created successfully. Press ENTER to continue\n")
@ -107,7 +107,6 @@ if __name__ == '__main__':
# elif forum == "CrackingPro": # elif forum == "CrackingPro":
# crawlerCrackingPro() # crawlerCrackingPro()
print("Scraping process completed successfully!") print("Scraping process completed successfully!")


+ 118
- 0
Forums/Initialization/geckodriver.log View File

@ -4318,6 +4318,7 @@ JavaScript error: resource://gre/modules/ExtensionTelemetry.jsm, line 109: Error
JavaScript error: resource://gre/modules/ExtensionTelemetry.jsm, line 113: Error: TelemetryStopwatch: finishing nonexisting stopwatch. Histogram: "WEBEXT_CONTENT_SCRIPT_INJECTION_MS_BY_ADDONID", key: "{73a6fe31-595d-460b-a920-fcc0f8843232}" JavaScript error: resource://gre/modules/ExtensionTelemetry.jsm, line 113: Error: TelemetryStopwatch: finishing nonexisting stopwatch. Histogram: "WEBEXT_CONTENT_SCRIPT_INJECTION_MS_BY_ADDONID", key: "{73a6fe31-595d-460b-a920-fcc0f8843232}"
JavaScript error: resource://gre/modules/ExtensionTelemetry.jsm, line 109: Error: TelemetryStopwatch: finishing nonexisting stopwatch. Histogram: "WEBEXT_CONTENT_SCRIPT_INJECTION_MS", key: "" JavaScript error: resource://gre/modules/ExtensionTelemetry.jsm, line 109: Error: TelemetryStopwatch: finishing nonexisting stopwatch. Histogram: "WEBEXT_CONTENT_SCRIPT_INJECTION_MS", key: ""
JavaScript error: resource://gre/modules/ExtensionTelemetry.jsm, line 113: Error: TelemetryStopwatch: finishing nonexisting stopwatch. Histogram: "WEBEXT_CONTENT_SCRIPT_INJECTION_MS_BY_ADDONID", key: "{73a6fe31-595d-460b-a920-fcc0f8843232}" JavaScript error: resource://gre/modules/ExtensionTelemetry.jsm, line 113: Error: TelemetryStopwatch: finishing nonexisting stopwatch. Histogram: "WEBEXT_CONTENT_SCRIPT_INJECTION_MS_BY_ADDONID", key: "{73a6fe31-595d-460b-a920-fcc0f8843232}"
<<<<<<< HEAD
>>>>>>> f804c1eb5950e89e7c55aa9044560646cbd911ee >>>>>>> f804c1eb5950e89e7c55aa9044560646cbd911ee
1686782889434 geckodriver INFO Listening on 127.0.0.1:55993 1686782889434 geckodriver INFO Listening on 127.0.0.1:55993
1686782894019 mozrunner::runner INFO Running command: "C:\\Users\\minhkhoitran\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "55994" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\MINHKH~1\\AppData\\Local\\Temp\\rust_mozprofileOgkkAM" 1686782894019 mozrunner::runner INFO Running command: "C:\\Users\\minhkhoitran\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "55994" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\MINHKH~1\\AppData\\Local\\Temp\\rust_mozprofileOgkkAM"
@ -4352,11 +4353,25 @@ start@resource://gre/modules/TorProcess.jsm:81:12
console.error: TorMonitorService: console.error: TorMonitorService:
Tor not running, not starting to monitor it. Tor not running, not starting to monitor it.
1686782895293 RemoteAgent WARN TLS certificate errors will be ignored for this session 1686782895293 RemoteAgent WARN TLS certificate errors will be ignored for this session
=======
1687240079948 geckodriver INFO Listening on 127.0.0.1:50448
1687240084735 mozrunner::runner INFO Running command: "C:\\Users\\calsyslab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50449" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofileuYe2AP"
console.log: "TorSettings: loadFromPrefs()"
console.log: "TorConnect: init()"
console.log: "TorConnect: Entering Initial state"
console.log: "TorConnect: Observed profile-after-change"
console.log: "TorConnect: Observing topic 'TorProcessExited'"
console.log: "TorConnect: Observing topic 'TorLogHasWarnOrErr'"
console.log: "TorConnect: Observing topic 'torsettings:ready'"
console.log: "TorSettings: Observed profile-after-change"
1687240085868 Marionette INFO Marionette enabled
>>>>>>> d30c8066e307536b5e951ec07a15f08833074d5e
console.log: "TorConnect: Will load after bootstrap => [about:blank]" console.log: "TorConnect: Will load after bootstrap => [about:blank]"
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
<<<<<<< HEAD
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined
DevTools listening on ws://localhost:55994/devtools/browser/705394f9-2ee0-40c2-afa4-edbb645f03a8 DevTools listening on ws://localhost:55994/devtools/browser/705394f9-2ee0-40c2-afa4-edbb645f03a8
JavaScript error: resource://gre/modules/PromiseWorker.jsm, line 106: Error: Could not get children of file(C:\Users\MINHKH~1\AppData\Local\Temp\rust_mozprofileOgkkAM\thumbnails) because it does not exist JavaScript error: resource://gre/modules/PromiseWorker.jsm, line 106: Error: Could not get children of file(C:\Users\MINHKH~1\AppData\Local\Temp\rust_mozprofileOgkkAM\thumbnails) because it does not exist
@ -4397,11 +4412,63 @@ console.error: TorMonitorService:
Tor not running, not starting to monitor it. Tor not running, not starting to monitor it.
1686853973687 RemoteAgent WARN TLS certificate errors will be ignored for this session 1686853973687 RemoteAgent WARN TLS certificate errors will be ignored for this session
console.log: "TorConnect: Will load after bootstrap => [about:blank]" console.log: "TorConnect: Will load after bootstrap => [about:blank]"
=======
console.error: "Could not load engine [email protected]: Error: Extension is invalid"
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined
DevTools listening on ws://localhost:50449/devtools/browser/e85e6865-1f97-480a-8e46-778271184a87
1687240090364 Marionette INFO Listening on port 50454
1687240090846 RemoteAgent WARN TLS certificate errors will be ignored for this session
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/, line 2: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/member.php?action=login, line 2: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/member.php?action=login, line 5: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/member.php?action=login, line 9: ReferenceError: use_xmlhttprequest is not defined
JavaScript error: resource://gre/modules/PromiseWorker.jsm, line 106: Error: Could not get children of file(C:\Users\calsyslab\AppData\Local\Temp\rust_mozprofileuYe2AP\thumbnails) because it does not exist
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=86, line 3: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/jeditable/jeditable.min.js, line 38: ReferenceError: jQuery is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/inline_edit.js?ver=1808, line 6: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=2628, line 6: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/report.js?ver=1804, line 4: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/jeditable/jeditable.min.js, line 38: ReferenceError: jQuery is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/thread.js?ver=1809, line 4: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=2628, line 19: ReferenceError: use_xmlhttprequest is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=2628, line 25: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=2628&page=2, line 6: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/report.js?ver=1804, line 4: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/jeditable/jeditable.min.js, line 38: ReferenceError: jQuery is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/thread.js?ver=1809, line 4: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=2628&page=2, line 19: ReferenceError: use_xmlhttprequest is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=2628&page=2, line 25: ReferenceError: $ is not defined
1687240218310 Marionette INFO Stopped listening on port 50454
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver]
!!! error running onStopped callback: TypeError: callback is not a function
JavaScript error: resource:///modules/sessionstore/SessionFile.jsm, line 375: Error: _initWorker called too early! Please read the session file from disk first.
JavaScript error: resource://gre/modules/PromiseWorker.jsm, line 106: Error: Could not get children of file(C:\Users\calsyslab\AppData\Local\Temp\rust_mozprofileuYe2AP\thumbnails) because it does not exist
1687240220095 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64
unwatchForTabs()@TargetList.jsm:70
unwatchForTargets()@TargetList.jsm:37
destructor()@TargetList.jsm:109
stop()@CDP.jsm:104
close()@RemoteAgent.jsm:138
1687240311209 geckodriver INFO Listening on 127.0.0.1:50519
1687240315070 mozrunner::runner INFO Running command: "C:\\Users\\calsyslab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "50520" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofiletzrkDs"
console.log: "TorSettings: loadFromPrefs()"
console.log: "TorConnect: init()"
console.log: "TorConnect: Entering Initial state"
console.log: "TorConnect: Observed profile-after-change"
console.log: "TorConnect: Observing topic 'TorProcessExited'"
console.log: "TorConnect: Observing topic 'TorLogHasWarnOrErr'"
console.log: "TorConnect: Observing topic 'torsettings:ready'"
console.log: "TorSettings: Observed profile-after-change"
1687240315958 Marionette INFO Marionette enabled
console.log: "TorConnect: Will load after bootstrap => [about:blank]"
console.error: "Could not load engine [email protected]: Error: Extension is invalid"
>>>>>>> d30c8066e307536b5e951ec07a15f08833074d5e
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory. JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined
<<<<<<< HEAD
DevTools listening on ws://localhost:50008/devtools/browser/15543a67-b1fe-427a-8516-16fe520676d3 DevTools listening on ws://localhost:50008/devtools/browser/15543a67-b1fe-427a-8516-16fe520676d3
console.warn: LoginRecipes: "getRecipes: falling back to a synchronous message for:" "http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion" console.warn: LoginRecipes: "getRecipes: falling back to a synchronous message for:" "http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion"
console.warn: LoginRecipes: "getRecipes: falling back to a synchronous message for:" "http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion" console.warn: LoginRecipes: "getRecipes: falling back to a synchronous message for:" "http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion"
@ -4620,3 +4687,54 @@ JavaScript error: resource://gre/modules/PromiseWorker.jsm, line 106: Error: Cou
JavaScript error: resource://gre/modules/AsyncShutdown.jsm, line 575: uncaught exception: SessionFileInternal.getWriter() called too early! Please read the session file from disk first. JavaScript error: resource://gre/modules/AsyncShutdown.jsm, line 575: uncaught exception: SessionFileInternal.getWriter() called too early! Please read the session file from disk first.
JavaScript error: resource://gre/modules/PromiseWorker.jsm, line 106: Error: Could not get children of file(C:\Users\MINHKH~1\AppData\Local\Temp\rust_mozprofilexDqUwM\thumbnails) because it does not exist JavaScript error: resource://gre/modules/PromiseWorker.jsm, line 106: Error: Could not get children of file(C:\Users\MINHKH~1\AppData\Local\Temp\rust_mozprofilexDqUwM\thumbnails) because it does not exist
[Parent 7372, IPC I/O Parent] WARNING: pipe error: 232: file /var/tmp/build/firefox-cc1bd3d61c87/ipc/chromium/src/chrome/common/ipc_channel_win.cc:554 [Parent 7372, IPC I/O Parent] WARNING: pipe error: 232: file /var/tmp/build/firefox-cc1bd3d61c87/ipc/chromium/src/chrome/common/ipc_channel_win.cc:554
=======
DevTools listening on ws://localhost:50520/devtools/browser/4b6276ea-c420-4b6d-b4bc-fda679f97800
1687240317156 Marionette INFO Listening on port 50525
1687240317256 RemoteAgent WARN TLS certificate errors will be ignored for this session
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/, line 2: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/member.php?action=login, line 2: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/member.php?action=login, line 5: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/member.php?action=login, line 9: ReferenceError: use_xmlhttprequest is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=86, line 3: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/jeditable/jeditable.min.js, line 38: ReferenceError: jQuery is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/inline_edit.js?ver=1808, line 6: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=2628, line 6: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/report.js?ver=1804, line 4: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/jeditable/jeditable.min.js, line 38: ReferenceError: jQuery is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/thread.js?ver=1809, line 4: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=2628, line 19: ReferenceError: use_xmlhttprequest is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=2628, line 25: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=86, line 3: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/jeditable/jeditable.min.js, line 38: ReferenceError: jQuery is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/inline_edit.js?ver=1808, line 6: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=86&page=2, line 3: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/jeditable/jeditable.min.js, line 38: ReferenceError: jQuery is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/inline_edit.js?ver=1808, line 6: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=16404, line 6: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/report.js?ver=1804, line 4: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/jeditable/jeditable.min.js, line 38: ReferenceError: jQuery is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/thread.js?ver=1809, line 4: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=16404, line 19: ReferenceError: use_xmlhttprequest is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/showthread.php?tid=16404, line 25: ReferenceError: $ is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=86&page=2, line 3: ReferenceError: lang is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/jeditable/jeditable.min.js, line 38: ReferenceError: jQuery is not defined
JavaScript error: http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/jscripts/inline_edit.js?ver=1808, line 6: ReferenceError: $ is not defined
1687240409940 Marionette INFO Stopped listening on port 50525
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver]
###!!! [Parent][MessageChannel] Error: (msgtype=0x140007,name=PBackgroundLSDatabase::Msg_RequestAllowToClose) Channel error: cannot send/recv
[Parent 1036, IPC I/O Parent] WARNING: file /var/tmp/build/firefox-b6010b1466c9/ipc/chromium/src/base/process_util_win.cc:167
!!! error running onStopped callback: TypeError: callback is not a function
JavaScript error: resource:///modules/sessionstore/SessionFile.jsm, line 375: Error: _initWorker called too early! Please read the session file from disk first.
JavaScript error: resource://gre/modules/PromiseWorker.jsm, line 106: Error: Could not get children of file(C:\Users\calsyslab\AppData\Local\Temp\rust_mozprofiletzrkDs\thumbnails) because it does not exist
###!!! [Child][MessageChannel] Error: (msgtype=0x5D0005,name=PImageBridge::Msg_WillClose) Channel error: cannot send/recv
1687240410572 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64
unwatchForTabs()@TargetList.jsm:70
unwatchForTargets()@TargetList.jsm:37
destructor()@TargetList.jsm:109
stop()@CDP.jsm:104
close()@RemoteAgent.jsm:138
>>>>>>> d30c8066e307536b5e951ec07a15f08833074d5e

+ 18
- 23
Forums/Initialization/prepare_parser.py View File

@ -9,7 +9,7 @@ from Forums.BestCardingWorld.parser import *
from Forums.CryptBB.parser import * from Forums.CryptBB.parser import *
from Forums.Classifier.classify_product import predict from Forums.Classifier.classify_product import predict
#from DarkWebMining_Sample.Forums.Classifier.classify_product import predict_semi
# from DarkWebMining_Sample.Forums.Classifier.classify_product import predict_semi
# determines if forum is russian, not really used now but maybe later # determines if forum is russian, not really used now but maybe later
@ -62,9 +62,9 @@ def getPosts(posts):
#uses db connection , another program, methods to persists values to the correct categories #uses db connection , another program, methods to persists values to the correct categories
#@param: row is the list of entries for this instance, cur is the db connection object #@param: row is the list of entries for this instance, cur is the db connection object
def persist_data(row, cur):
def persist_data(url, row, cur):
forum = create_forum(cur, row)
forum = create_forum(cur, row, url)
board = create_board(cur, row, forum) board = create_board(cur, row, forum)
@ -77,15 +77,13 @@ def persist_data(row, cur):
#main method for this program, what actually gets the parsed info from the parser, and persists them into the db #main method for this program, what actually gets the parsed info from the parser, and persists them into the db
#calls the different parser methods here depending on the type of html page #calls the different parser methods here depending on the type of html page
def new_parse(forum, createLog):
def new_parse(forum, url, createLog):
print("Parsing The " + forum + " Forum and conduct data classification to store the information in the database.")
crawlerDate = date.today()
from Forums.Initialization.forums_mining import CURRENT_DATE
ini = time.time()
print("Parsing The " + forum + " Forum and conduct data classification to store the information in the database.")
global site
# ini = time.time()
# Connecting to the database # Connecting to the database
con = connectDataBase() con = connectDataBase()
@ -96,27 +94,26 @@ def new_parse(forum, createLog):
nError = 0 nError = 0
lines = [] #lines.clear()
lns = [] #lns.clear()
lines = [] # listing pages
lns = [] # description pages
detPage = {} detPage = {}
rw = []
# Creating the log file for each Forum # Creating the log file for each Forum
if createLog: if createLog:
if not os.path.exists("./" + forum + "/Logs/" + forum + "_" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + ".log"):
logFile = open("./" + forum + "/Logs/" + forum + "_" + str("%02d" %crawlerDate.today().month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + ".log", "w")
if not os.path.exists("./" + forum + "/Logs/" + forum + "_" + CURRENT_DATE + ".log"):
logFile = open("./" + forum + "/Logs/" + forum + "_" + CURRENT_DATE + ".log", "w")
else: else:
print("Files of the date " + str("%02d" %crawlerDate.today().month) + str("%02d" %crawlerDate.today().day) + str("%04d" %crawlerDate.today().year) +
" from the Forum " + forum + " were already read. Delete the referent information in the Data Base and also delete the log file "
"in the _Logs folder to read files from this Forum of this date again.")
print("Files of the date " + CURRENT_DATE + " from the Forum " + forum +
" were already read. Delete the referent information in the Data Base and also delete the log file"
" in the _Logs folder to read files from this Forum of this date again.")
raise SystemExit raise SystemExit
# Reading the Listing Html Pages # Reading the Listing Html Pages
for fileListing in glob.glob(os.path.join (os.getcwd().replace("initialization","") + forum + "\\HTML_Pages\\" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + "\\Listing" ,'*.html')):
for fileListing in glob.glob(os.path.join("..\\" + forum + "\\HTML_Pages\\" + CURRENT_DATE + "\\Listing", '*.html')):
lines.append(fileListing) lines.append(fileListing)
# Reading the Description Html Pages # Reading the Description Html Pages
for fileDescription in glob.glob(os.path.join (os.getcwd().replace("initialization","") + forum + "\\HTML_Pages\\" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + "\\Description" ,'*.html')):
for fileDescription in glob.glob(os.path.join("..\\" + forum + "\\HTML_Pages\\" + CURRENT_DATE + "\\Description" ,'*.html')):
lns.append(fileDescription) lns.append(fileDescription)
# Parsing the Description Pages and put the tag's content into a dictionary (Hash table) # Parsing the Description Pages and put the tag's content into a dictionary (Hash table)
@ -218,9 +215,7 @@ def new_parse(forum, createLog):
rec = rec.split(',') rec = rec.split(',')
# key = u"Top:" + rec[1].upper().strip() + u" User:" + rec[5].upper().strip() # key = u"Top:" + rec[1].upper().strip() + u" User:" + rec[5].upper().strip()
# key = rec[16]
url = ''.join(e for e in rec[6] if e.isalnum())
key = u"Url:" + url
key = u"Url:" + cleanLink(rec[6])
if key in detPage: if key in detPage:
@ -237,7 +232,7 @@ def new_parse(forum, createLog):
# Persisting the information in the database # Persisting the information in the database
try: try:
persist_data(tuple(rec), cur)
persist_data(url, tuple(rec), cur)
con.commit() con.commit()
except: except:


+ 1
- 1
Forums/Utilities/utilities.py View File

@ -160,7 +160,7 @@ def cleanLink(originalLink):
return originalLink return originalLink
def organizeTopics(forum, nm, topic, board, author, views, posts, href, addDate):
def organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate):
day = time.strftime("%m/%d/%Y") day = time.strftime("%m/%d/%Y")
ahora = time.strftime("%I:%M:%S") ahora = time.strftime("%I:%M:%S")


+ 109
- 77
MarketPlaces/DB_Connection/db_connection.py View File

@ -2,33 +2,21 @@ __author__ = 'DarkWeb'
import psycopg2 import psycopg2
import traceback import traceback
import time
from datetime import date
'''
vendors
add
mk_id
vendor_rating
successful_transactions
products
add
product_rating
remove
date_item
escrow_info
last_viewed
vendor_rating
successful_transaction
'''
import configparser
def connectDataBase(): def connectDataBase():
try: try:
return psycopg2.connect(host='localhost', user='postgres', password='password', dbname='darkweb_markets_forums')
config = configparser.ConfigParser()
config.read('../../setup.ini')
ip = config.get('PostgreSQL', 'ip')
username = config.get('PostgreSQL', 'username')
password = config.get('PostgreSQL', 'password')
database = config.get('PostgreSQL', 'database')
return psycopg2.connect(host=ip, user=username, password=password, dbname=database)
except: except:
@ -40,7 +28,7 @@ def verifyMarketPlace(cur, nameMarket):
try: try:
cur.execute("select id_mk from marketPlaces where name_mk = %(nameMarket)s limit 1", {'nameMarket': nameMarket})
cur.execute("select market_id from marketPlaces where name_market = %(nameMarket)s limit 1", {'nameMarket': nameMarket})
recset = cur.fetchall() recset = cur.fetchall()
@ -55,11 +43,12 @@ def verifyMarketPlace(cur, nameMarket):
print (trace) print (trace)
def verifyVendor(cur, nameVendor):
def verifyVendor(cur, nameVendor, marketId):
try: try:
cur.execute("select id_ve from vendors where name_ve = %(nameVendor)s limit 1", {'nameVendor': nameVendor})
cur.execute("select vendor_id from vendors where name_vendor = %(nameVendor)s and market_id = %(marketId)s "
"limit 1", {'nameVendor': nameVendor, 'marketId': marketId})
recset = cur.fetchall() recset = cur.fetchall()
@ -78,7 +67,7 @@ def getLastMarketPlace(cur):
try: try:
cur.execute("select id_mk from marketPlaces order by id_mk desc limit 1")
cur.execute("select market_id from marketPlaces order by market_id desc limit 1")
recset = cur.fetchall() recset = cur.fetchall()
@ -97,7 +86,7 @@ def getLastVendor(cur):
try: try:
cur.execute("select id_ve from vendors order by id_ve desc limit 1")
cur.execute("select vendor_id from vendors order by vendor_id desc limit 1")
recset = cur.fetchall() recset = cur.fetchall()
@ -112,56 +101,72 @@ def getLastVendor(cur):
print (trace) print (trace)
def create_marketPlace(cur, row):
def create_marketPlace(cur, row, url):
marketPlace = verifyMarketPlace(cur, row[0])
marketId = verifyMarketPlace(cur, row[0])
if not marketPlace:
marketPlace = int(getLastMarketPlace(cur) + 1)
if not marketId:
marketId = int(getLastMarketPlace(cur) + 1)
sql = "Insert into marketPlaces (id_mk, name_mk, url_mk, dateInserted) Values (%s, %s, %s, %s)"
sql = "Insert into marketplaces (market_id, name_market, url_market, dateinserted_market) " \
"Values (%s, %s, %s, %s)"
recset = [marketPlace, row[0], None, time.asctime()]
recset = [marketId, row[0], url, row[21]]
cur.execute(sql, recset) cur.execute(sql, recset)
return marketPlace
return marketId
def create_vendor(cur, row):
def create_vendor(cur, row, marketId):
vendor = verifyVendor(cur, row[18])
vendorId = verifyVendor(cur, row[1], marketId)
if not vendor:
vendor = int(getLastVendor(cur) + 1)
if not vendorId:
vendorId = int(getLastVendor(cur) + 1)
sql = "Insert into vendors (id_ve, name_ve, dateInserted) Values (%s, %s, %s)"
sql = "Insert into vendors (vendor_id, market_id, name_vendor, rating_vendor, successfultransactions_vendor, dateinserted_vendor) Values (%s, %s, %s, %s, %s, %s)"
recset = [vendor, row[18], time.asctime()]
recset = [vendorId, marketId,
row[1],
row[2] if row[2] != '-1' else None,
row[3] if row[3] != '-1' else None,
row[21]]
cur.execute(sql, recset) cur.execute(sql, recset)
return vendor
def create_items(cur, row, marketPlace, vendor):
sql = "Insert into items (id_mk, id_ve, date_item, name_item, description_item, cve_item, ms_item, category_item, " \
"escrowinfo_item, views_item, reviews_item, addeddate_item, lastvieweddate_item, btcsellingprice_item, usdsellingprice_item, " \
"eurosellingprice_item, quantitysold_item, quantityleft_item, shippedfrom_item, shippedto_item, vendorrating, successfulltransactions, " \
"termsandconditions, dateinserted_item, classification_item) Values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, " \
"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
recset = [marketPlace, vendor, str("%02d" %date.today().month) + "/" + str("%02d" %date.today().day) + "/" + str("%04d" %date.today().year),
#recset = [marketPlace, vendor, str("%02d" %date.today().day) + "/" + str("%02d" %date.today().month) + "/" + str("%04d" %date.today().year),
row[1] if row[1]!= '-1' else None, row[5] if row[5]!= '-1' else None, row[2] if row[2]!= '-1' else None,
row[3] if row[3]!= '-1' else None, row[4] if row[4]!= '-1' else None, row[6] if row[6]!= '-1' else None,
row[7] if row[7]!= '-1' else None, row[8] if row[8]!= '-1' else None, row[9] if row[9]!= '-1' else None,
row[10] if row[10]!= '-1' else None, row[11] if row[11]!= '-1' else None, row[12] if row[12]!= '-1' else None,
row[13] if row[13]!= '-1' else None, row[14] if row[14]!= '-1' else None, row[15] if row[15]!= '-1' else None,
row[16] if row[16]!= '-1' else None, row[17] if row[17]!= '-1' else None, row[19] if row[19]!= '-1' else None,
row[20] if row[20]!= '-1' else None, row[21] if row[21]!= '-1' else None, row[22] if row[22]!= '-1' else None,
row[24] if row[24]!= '-1' else None]
return vendorId
def create_items(cur, row, marketId, vendorId):
sql = "Insert into items (market_id, vendor_id, name_item, description_item, cve_item, ms_item, category_item, " \
"views_item, reviews_item, rating_item, dateadded_item, btc_item, usd_item, euro_item, quantitysold_item, " \
"quantityleft_item, shippedfrom_item, shippedto_item, href_item, lastseen_item, dateinserted_item, " \
"classification_item) Values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, " \
"%s, %s, %s)"
recset = [marketId, vendorId,
row[4],
row[5] if row[5] != '-1' else None,
row[6] if row[6] != '-1' else None,
row[7] if row[7] != '-1' else None,
row[8] if row[8] != '-1' else None,
row[9] if row[9] != '-1' else None,
row[10] if row[10] != '-1' else None,
row[11] if row[11] != '-1' else None,
row[12] if row[12] != '-1' else None,
row[13] if row[13] != '-1' else None,
row[14] if row[14] != '-1' else None,
row[15] if row[15] != '-1' else None,
row[16] if row[16] != '-1' else None,
row[17] if row[17] != '-1' else None,
row[18] if row[18] != '-1' else None,
row[19] if row[19] != '-1' else None,
row[20] if row[20] != '-1' else None,
row[21],
row[21],
row[22]]
cur.execute(sql, recset) cur.execute(sql, recset)
@ -170,26 +175,53 @@ def create_database(cur, con):
try: try:
sql = "create table marketplaces(id_mk integer not null, name_mk character varying(255) not null, " \
"url_mk character varying(255) null, dateinserted timestamp(6) with time zone not null, " \
"constraint pk_mk primary key (id_mk))"
sql = "create table marketplaces(market_id integer not null, name_market character varying(255) not null, " \
"url_market character varying(255) null, dateinserted_market timestamp(6) with time zone not null, " \
"constraint markets_pk primary key (market_id))"
cur.execute(sql)
sql = "create table vendors(vendor_id integer not null, market_id integer not null, name_vendor character " \
"varying(255) not null, rating_vendor character varying(255), successfultransactions_vendor integer " \
"null, dateinserted_vendor timestamp(6) with time zone not null, constraint vendors_pk primary key (" \
"vendor_id), constraint vendors_market_id_fkey foreign key (market_id) references marketplaces (" \
"market_id))"
cur.execute(sql)
sql = "create table vendors_history(vendor_id integer not null, market_id integer not null, name_vendor " \
"character varying(255) not null, rating_vendor character varying(255), successfultransactions_vendor " \
"integer null, dateinserted_vendor timestamp(6) with time zone not null, constraint vendors_history_pk " \
"primary key (vendor_id, dateinserted_vendor), constraint vendors_history_vendor_id_fkey foreign key (" \
"vendor_id) references vendors (vendor_id), constraint vendors_history_market_id_fkey foreign key (" \
"market_id) references marketplaces (market_id))"
cur.execute(sql) cur.execute(sql)
sql = "create table vendors(id_ve integer not null, name_ve character varying(255) not null, " \
"dateinserted timestamp(6) with time zone not null, constraint pk_ve primary key (id_ve))"
sql = "create table items(market_id integer not null, vendor_id integer not null, name_item character " \
"varying(255) not null, description_item character varying(1000000) null, cve_item character varying(" \
"255) null, ms_item character varying(255) null, category_item character varying(255) null, views_item " \
"integer null, reviews_item integer null, rating_item character varying(255) null, dateadded_item " \
"character varying(25) null, btc_item character varying(255) null, usd_item character varying(255) " \
"null, euro_item character varying(255) null, quantitysold_item integer null, quantityleft_item " \
"character varying(255) null, shippedfrom_item character varying(255) null, shippedto_item character " \
"varying(255) null, href_item character varying(255) null, lastseen_item timestamp(6) with time zone " \
"not null, dateinserted_item timestamp(6) with time zone not null, classification_item double " \
"precision not null, constraint items_pk primary key (market_id, vendor_id, name_item), constraint " \
"items_market_id_fkey foreign key (market_id) references marketplaces (market_id),constraint " \
"items_vendor_id_fkey foreign key (vendor_id) references vendors (vendor_id))"
cur.execute(sql) cur.execute(sql)
sql = "create table items(id_mk integer not null, id_ve integer not null, date_item date not null, name_item character varying(255) not null, " \
"description_item character varying(1000000) null, cve_item character varying(255) null, ms_item character varying(255) null, " \
"category_item character varying(255) null, escrowinfo_item character varying(1000) null, views_item integer null, " \
"reviews_item character varying(255) null, addeddate_item character varying(25) null, " \
"lastvieweddate_item character varying(25) null, btcsellingprice_item character varying(255) null, " \
"usdsellingprice_item character varying(255) null, eurosellingprice_item character varying(255) null, quantitysold_item integer null, " \
"quantityleft_item character varying(255) null, shippedfrom_item character varying(255) null, shippedto_item character varying(5000) null, " \
"vendorrating character varying(255) null, successfulltransactions character varying(500) null, " \
"termsandconditions character varying(15000) null, dateinserted_item timestamp(6) with time zone not null, " \
"classification_item double precision not null, constraint pk_items primary key (id_mk, id_ve, date_item, name_item), " \
"constraint items_id_mk_fkey foreign key (id_mk) references marketplaces (id_mk),constraint items_id_ve_fkey foreign key (id_ve) references vendors (id_ve))"
sql = "create table items_history(market_id integer not null, vendor_id integer not null, name_item character " \
"varying(255) not null, description_item character varying(1000000) null, cve_item character varying(" \
"255) null, ms_item character varying(255) null, category_item character varying(255) null, views_item " \
"integer null, reviews_item integer null, rating_item character varying(255) null, dateadded_item " \
"character varying(25) null, btc_item character varying(255) null, usd_item character varying(255) " \
"null, euro_item character varying(255) null, quantitysold_item integer null, quantityleft_item " \
"character varying(255) null, shippedfrom_item character varying(255) null, shippedto_item character " \
"varying(255) null, href_item character varying(255) null, lastseen_item timestamp(6) with time zone " \
"not null, dateinserted_item timestamp(6) with time zone not null, classification_item double " \
"precision not null, constraint items_history_pk primary key (market_id, vendor_id, name_item, " \
"dateinserted_item), constraint items_history_market_id_fkey foreign key (market_id) references " \
"marketplaces (market_id), constraint items_history_vendor_id_fkey foreign key (vendor_id) references " \
"vendors (vendor_id))"
cur.execute(sql) cur.execute(sql)
con.commit() con.commit()


+ 32
- 0
MarketPlaces/Initialization/geckodriver.log View File

@ -6073,3 +6073,35 @@ unwatchForTargets()@TargetList.jsm:37
destructor()@TargetList.jsm:109 destructor()@TargetList.jsm:109
stop()@CDP.jsm:104 stop()@CDP.jsm:104
close()@RemoteAgent.jsm:138 close()@RemoteAgent.jsm:138
1687245533907 geckodriver INFO Listening on 127.0.0.1:62051
1687245536832 mozrunner::runner INFO Running command: "C:\\Users\\calsyslab\\Desktop\\Tor Browser\\Browser\\firefox.exe" "--marionette" "--remote-debugging-port" "62052" "--remote-allow-hosts" "localhost" "-no-remote" "-profile" "C:\\Users\\CALSYS~1\\AppData\\Local\\Temp\\rust_mozprofileuMGaeY"
console.log: "TorSettings: loadFromPrefs()"
console.log: "TorConnect: init()"
console.log: "TorConnect: Entering Initial state"
console.log: "TorConnect: Observed profile-after-change"
console.log: "TorConnect: Observing topic 'TorProcessExited'"
console.log: "TorConnect: Observing topic 'TorLogHasWarnOrErr'"
console.log: "TorConnect: Observing topic 'torsettings:ready'"
console.log: "TorSettings: Observed profile-after-change"
1687245537956 Marionette INFO Marionette enabled
console.log: "TorConnect: Will load after bootstrap => [about:blank]"
console.error: "Could not load engine [email protected]: Error: Extension is invalid"
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XULStore.jsm, line 66: Error: Can't find profile directory.
JavaScript error: resource://gre/modules/XPCOMUtils.jsm, line 161: TypeError: Cc[aContract] is undefined
DevTools listening on ws://localhost:62052/devtools/browser/9cf17e56-2fb1-468d-b65e-15c4de4eaa64
1687245540759 Marionette INFO Listening on port 49935
1687245540897 RemoteAgent WARN TLS certificate errors will be ignored for this session
1687245639406 Marionette INFO Stopped listening on port 49935
JavaScript error: resource:///modules/Interactions.jsm, line 209: NS_ERROR_FAILURE: Component returned failure code: 0x80004005 (NS_ERROR_FAILURE) [nsIUserIdleService.removeIdleObserver]
!!! error running onStopped callback: TypeError: callback is not a function
JavaScript error: resource:///modules/sessionstore/SessionFile.jsm, line 375: Error: _initWorker called too early! Please read the session file from disk first.
JavaScript error: resource://gre/modules/PageThumbs.jsm, line 709: AbortError: IOUtils.profileBeforeChange getter: IOUtils: profileBeforeChange phase has already finished
1687245650576 RemoteAgent ERROR unable to stop listener: [Exception... "Component returned failure code: 0x8000ffff (NS_ERROR_UNEXPECTED) [nsIWindowMediator.getEnumerator]" nsresult: "0x8000ffff (NS_ERROR_UNEXPECTED)" location: "JS frame :: chrome://remote/content/cdp/observers/TargetObserver.jsm :: stop :: line 64" data: no] Stack trace: stop()@TargetObserver.jsm:64
unwatchForTabs()@TargetList.jsm:70
unwatchForTargets()@TargetList.jsm:37
destructor()@TargetList.jsm:109
stop()@CDP.jsm:104
close()@RemoteAgent.jsm:138

+ 1
- 1
MarketPlaces/Initialization/marketsList.txt View File

@ -1 +1 @@
Tor2door
Tor2door

+ 6
- 3
MarketPlaces/Initialization/markets_mining.py View File

@ -8,9 +8,12 @@ import os
from datetime import * from datetime import *
from MarketPlaces.DarkFox.crawler_selenium import crawler as crawlerDarkFox from MarketPlaces.DarkFox.crawler_selenium import crawler as crawlerDarkFox
from MarketPlaces.Tor2door.crawler_selenium import crawler as crawlerTor2door from MarketPlaces.Tor2door.crawler_selenium import crawler as crawlerTor2door
from MarketPlaces.ThiefWorld.crawler_selenium import crawler as crawlerThiefWorld
import time import time
CURRENT_DATE = str("%02d" % date.today().month) + str("%02d" % date.today().day) + str("%04d" % date.today().year)
# reads list of marketplaces # reads list of marketplaces
def getMarkets(): def getMarkets():
@ -25,12 +28,10 @@ def createDirectory(mkt):
# Package should already be there, holding crawler and parser # Package should already be there, holding crawler and parser
pagesDir = '../' + mkt + '/HTML_Pages' pagesDir = '../' + mkt + '/HTML_Pages'
# sharedFolderPath = r'\\VBoxSvr\VM_Files_(shared)'
# pagesDir = os.path.join(sharedFolderPath, 'HTML/MarketPlaces/' + mkt + '/HTML_Pages')
if not os.path.isdir(pagesDir): if not os.path.isdir(pagesDir):
os.makedirs(pagesDir) os.makedirs(pagesDir)
currentDateDir = pagesDir + '/' + str("%02d" %date.today().month) + str("%02d" %date.today().day) + str("%04d" %date.today().year)
currentDateDir = pagesDir + '/' + CURRENT_DATE
if not os.path.isdir(currentDateDir): if not os.path.isdir(currentDateDir):
os.mkdir(currentDateDir) os.mkdir(currentDateDir)
@ -66,5 +67,7 @@ if __name__ == '__main__':
crawlerDarkFox() crawlerDarkFox()
elif mkt == 'Tor2door': elif mkt == 'Tor2door':
crawlerTor2door() crawlerTor2door()
elif mkt == "ThiefWorld":
crawlerThiefWorld()
print("Scraping process completed successfully!") print("Scraping process completed successfully!")

+ 59
- 95
MarketPlaces/Initialization/prepare_parser.py View File

@ -1,4 +1,4 @@
__author__ = 'DarkWeb'
__author__ = 'Helium'
import glob import glob
import os import os
@ -18,128 +18,94 @@ def mergePages(rmm, rec):
print("----------------- Matched: " + rec[1] + "--------------------") print("----------------- Matched: " + rec[1] + "--------------------")
# if rec[1] == "-1": #Item_Name
# rec[1] = rmm[0]
rec[1] = rmm[0]
if rec[2] == "-1": #Item_CVE_Classification
rec[2] = rmm[4]
if rec[3] == "-1": #Item_MS_Classification
rec[3] = rmm[5]
if rec[4] == "-1": #Item_MarketCategory
rec[4] = rmm[7]
if rec[5] == "-1": #Item_Description
rec[5] = rmm[1]
elif rmm[1] != "-1":
rec[5] = rec[5] + " " + rmm[1]
if rec[6] == "-1": #Item _EscrowInfo
rec[6] = rmm[11]
#rec[7] = "-1" #Item__N.OfViews
if rec[8] == "-1": #Item_Reviews
rec[8] = rmm[6]
if rec[9] == "-1": #Item_AddedDate
rec[9] = rmm[15]
if rec[10] == "-1": #Item_LastViewedDate
rec[10] = rmm[2]
if rec[11] == "-1": #Item_BTC_SellingPrice
rec[11] = rmm[18]
if rec[12] == "-1": #Item_US_SellingPrice
rec[12] = rmm[19]
if rec[13] == "-1": #Item_EURO_SellingPrice
rec[13] = rmm[22]
if rec[14] == "-1": #Item_QuantitySold
rec[14] = rmm[14]
if rec[15] == "-1": #Item_QuantityLeft
rec[15] = rmm[10]
if rec[16] == "-1": #Item_ShippedFrom
rec[16] = rmm[8]
if rec[17] == "-1": #Item_ShippedTo
rec[17] = rmm[9]
if rec[18] == "-1": #Vendor_Name
rec[18] = rmm[13]
if rec[19] == "-1": #Vendor_Rating
rec[19] = rmm[20]
if rec[20] == "-1": #Vendor_Successfull Transactions
rec[20] = rmm[21]
if rec[21] == "-1": #Vendor_TermsAndConditions
rec[21] = rmm[12]
#rec[?] = rmm[17] #Item_EndDate
#rec[?] = rmm[?] #Item_Feedback
#rec[?] = rmm[?] #Shipping Options
#rec[?] = rmm[?] #Average Delivery Time
if rec[1] == "-1": # name_vendor
rec[1] = rmm[0]
if rec[2] == "-1": # rating_vendor
rec[2] = rmm[1]
if rec[3] == "-1": # success_vendor
rec[3] = rmm[2]
if rec[4] == "-1": # name_item
rec[4] = rmm[3]
if rec[5] == "-1": # description_item
rec[5] = rmm[4]
if rec[6] == "-1": # cve_item
rec[6] = rmm[5]
if rec[7] == "-1": # ms_item
rec[7] = rmm[6]
if rec[8] == "-1": # category_item
rec[8] = rmm[7]
if rec[9] == "-1": # views_item
rec[9] = rmm[8]
if rec[10] == "-1": # reviews_item
rec[10] = rmm[9]
if rec[11] == "-1": # rating_item
rec[11] = rmm[10]
if rec[12] == "-1": # adddate_item
rec[12] = rmm[11]
if rec[13] == "-1": # btc_item
rec[13] = rmm[12]
if rec[14] == "-1": # usd_item
rec[14] = rmm[13]
if rec[15] == "-1": # euro_item
rec[15] = rmm[14]
if rec[16] == "-1": # quantitysold_item
rec[16] = rmm[15]
if rec[17] == "-1": # quantityleft_item
rec[17] = rmm[16]
if rec[18] == "-1": # shippedfrom_item
rec[18] = rmm[17]
if rec[19] == "-1": # shippedto_item
rec[19] = rmm[18]
return rec return rec
def persist_data(row, cur):
def persist_data(url, row, cur):
marketPlace = create_marketPlace(cur, row)
marketPlace = create_marketPlace(cur, row, url)
vendor = create_vendor(cur, row)
vendor = create_vendor(cur, row, marketPlace)
create_items(cur, row, marketPlace, vendor) create_items(cur, row, marketPlace, vendor)
def new_parse(marketPlace, createLog):
def new_parse(marketPlace, url, createLog):
print("Parsing the " + marketPlace + " marketplace and conduct data classification to store the information in the database.")
from MarketPlaces.Initialization.markets_mining import CURRENT_DATE
crawlerDate = date.today()
print("Parsing the " + marketPlace + " marketplace and conduct data classification to store the information in the database.")
# ini = time.time() # ini = time.time()
global site
#Connecting to the database
# Connecting to the database
con = connectDataBase() con = connectDataBase()
cur = con.cursor() cur = con.cursor()
#Creating the tables (The database should be created manually)
# Creating the tables (The database should be created manually)
create_database(cur, con) create_database(cur, con)
nError = 0 nError = 0
lines = [] #lines.clear()
lns = [] #lns.clear()
lines = [] # listing pages
lns = [] # description pages
detPage = {} detPage = {}
rw = []
#Creating the log file for each Market Place #Creating the log file for each Market Place
if createLog: if createLog:
if not os.path.exists("./" + marketPlace + "/Logs/" + marketPlace + "_" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + ".log"):
logFile = open("./" + marketPlace + "/Logs/" + marketPlace + "_" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + ".log", "w")
if not os.path.exists("./" + marketPlace + "/Logs/" + marketPlace + "_" + CURRENT_DATE + ".log"):
logFile = open("./" + marketPlace + "/Logs/" + marketPlace + "_" + CURRENT_DATE + ".log", "w")
else: else:
print("Files of the date " + str("%02d" %crawlerDate.month) + "/" + str("%02d" %crawlerDate.day) + "/" + str("%04d" %crawlerDate.year) +
" from the Market Place " + marketPlace + " were already read. Delete the referent information in the Data Base and also delete the log file "
"in the _Logs folder to read files from this Market Place of this date again.")
print("Files of the date " + CURRENT_DATE + " from the Market Place " + marketPlace +
" were already read. Delete the referent information in the Data Base and also delete the log file"
" in the _Logs folder to read files from this Market Place of this date again.")
raise SystemExit raise SystemExit
# Reading the Listing Html Pages # Reading the Listing Html Pages
for fileListing in glob.glob(os.path.join (os.getcwd().replace("Initialization","") + marketPlace + "\\HTML_Pages\\" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + "\\Listing" ,'*.html')):
for fileListing in glob.glob(os.path.join("..\\" + marketPlace + "\\HTML_Pages\\" + CURRENT_DATE + "\\Listing", '*.html')):
lines.append(fileListing) lines.append(fileListing)
# Reading the Description Html Pages # Reading the Description Html Pages
for fileDescription in glob.glob(os.path.join (os.getcwd().replace("Initialization","") + marketPlace + "\\HTML_Pages\\" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + "\\Description" ,'*.html')):
for fileDescription in glob.glob(os.path.join("..\\" + marketPlace + "\\HTML_Pages\\" + CURRENT_DATE + "\\Description", '*.html')):
lns.append(fileDescription) lns.append(fileDescription)
# Parsing the Description Pages and put the tag's content into a dictionary (Hash table) # Parsing the Description Pages and put the tag's content into a dictionary (Hash table)
@ -245,9 +211,7 @@ def new_parse(marketPlace, createLog):
# key = rec[23] # key = rec[23]
# key = u"Pr:" + rec[1].upper()[:list_lim1] + u" Vendor:" + rec[18].upper()[:list_lim2] # key = u"Pr:" + rec[1].upper()[:list_lim1] + u" Vendor:" + rec[18].upper()[:list_lim2]
# key = u"Pr:" + rec[1].upper()
url = ''.join(e for e in rec[23] if e.isalnum())
key = u"Url:" + url
key = u"Url:" + cleanLink(rec[20])
# if the associated description page is parsed # if the associated description page is parsed
if key in detPage: if key in detPage:
@ -260,11 +224,11 @@ def new_parse(marketPlace, createLog):
# Append to the list the classification of the product # Append to the list the classification of the product
# rec.append(str(predict(rec[1], rec[5], language='markets'))) # rec.append(str(predict(rec[1], rec[5], language='markets')))
rec.append(str(predict(rec[1], rec[5], language='sup_english')))
rec.append(str(predict(rec[4], rec[5], language='sup_english')))
# Persisting the information in the database # Persisting the information in the database
try: try:
persist_data(tuple(rec), cur)
persist_data(url, tuple(rec), cur)
con.commit() con.commit()
except: except:


+ 309
- 0
MarketPlaces/ThiefWorld/crawler_selenium.py View File

@ -0,0 +1,309 @@
__author__ = 'Helium'
'''
ThiefWorld Forum Crawler (Selenium)
'''
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.ThiefWorld.parser import thiefworld_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
config = configparser.ConfigParser()
config.read('../../setup.ini')
counter = 1
baseURL = 'http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/'
# Opens Tor Browser, crawls the website, then parses, then closes tor
#acts like the main method for the crawler, another function at the end of this code calls this function later
def startCrawling():
opentor()
# mktName = getMKTName()
driver = getAccess()
if driver != 'down':
try:
login(driver)
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closetor(driver)
# new_parse(mktName, False)
# Opens Tor Browser
#prompts for ENTER input to continue
def opentor():
global pid
print("Connecting Tor...")
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
pid = pro.pid
time.sleep(7.5)
input('Tor Connected. Press ENTER to continue\n')
return
# Returns the name of the website
#return: name of site in string type
def getMKTName():
name = 'ThiefWorld'
return name
# Return the base link of the website
#return: url of base site in string type
def getFixedURL():
url = 'http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/'
return url
# Closes Tor Browser
#@param: current selenium driver
def closetor(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)
ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 2)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
return driver
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
#return: return the selenium driver or string 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
# then allows for manual solving of captcha in the terminal
#@param: current selenium web driver
def login(driver):
# wait for page to show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div/div[1]/div/div[1]/div[1]/ul")))
temp = driver.find_element(By.XPATH, '/html/body/div/header/div[2]/div/nav/div[2]/a[1]').get_attribute(
'href') # /html/body/div/div[2]/div/div[2]/div
link = urlparse.urljoin(baseURL, str(temp))
driver.get(link) # open
# wait for listing page show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.ID, "side-bar")))
# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(page, url):
cleanPage = cleanHTML(page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
# Gets the full path of the page to be saved along with its appropriate file name
#@param: raw url as crawler crawls through every site
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import CURRENT_DATE
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = r'..\ThiefWorld\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html'
else:
fullPath = r'..\ThiefWorld\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html'
return fullPath
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
#@param: raw url as crawler crawls through every site
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if (name == ''):
name = str(counter)
counter = counter + 1
return name
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
#in this example, there are a couple of categories some threads fall under such as
# Guides and Tutorials, Digital Products, and Software and Malware
#as you can see they are categories of products
def getInterestedLinks():
links = []
# Hacking and DDOS
links.append('http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/catalog/35')
# # Carding Manuals
# links.append('http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/catalog/20')
# # Software
# links.append('http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/catalog/37')
# # Database
# links.append('http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/catalog/38')
return links
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
#topic and description pages are crawled through here, where both types of pages are saved
#@param: selenium driver
def crawlForum(driver):
print("Crawling the ThiefWorld market")
linksToCrawl = getInterestedLinks()
visited = set(linksToCrawl)
initialTime = time.time()
count = 0
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(html, link)
has_next_page = True
while has_next_page:
list = productPages(html)
for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver.page_source, item)
driver.back()
# comment out
break
# comment out
if count == 1:
count = 0
break
try:
link = driver.find_element(by=By.XPATH, value=
'/html/body/div/div[1]/div/div/div[2]/div[3]/div/ul/li[13]/a').get_attribute('href')
if link == "":
raise NoSuchElementException
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(html, link)
count += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
# finalTime = time.time()
# print finalTime - initialTime
input("Crawling ThiefWorld forum done sucessfully. Press ENTER to continue\n")
# Returns 'True' if the link is a description link
#@param: url of any url crawled
#return: true if is a description page, false if not
def isDescriptionLink(url):
if 'product' in url:
return True
return False
# Returns True if the link is a listingPage link
#@param: url of any url crawled
#return: true if is a Listing page, false if not
def isListingLink(url):
if 'catalog' in url:
return True
return False
# calling the parser to define the links, the html is the url of a link from the list of interested link list
#@param: link from interested link list ie. getInterestingLinks()
#return: list of description links that should be crawled through
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return thiefworld_links_parser(soup)
# Drop links that "signout"
# def isSignOut(url):
# #absURL = urlparse.urljoin(url.base_url, url.url)
# if 'signout' in url.lower() or 'logout' in url.lower():
# return True
#
# return False
def crawler():
startCrawling()
# print("Crawling and Parsing BestCardingWorld .... DONE!")

+ 1483
- 0
MarketPlaces/ThiefWorld/geckodriver.log
File diff suppressed because it is too large
View File


+ 291
- 0
MarketPlaces/ThiefWorld/parser.py View File

@ -0,0 +1,291 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def darkfox_description_parser(soup):
# Fields to be parsed
name = "-1" # 0 Product_Name
describe = "-1" # 1 Product_Description
lastSeen = "-1" # 2 Product_LastViewDate
rules = "-1" # 3 NOT USED ...
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 5 Product_MS_Classification (Microsoft Security)
review = "-1" # 6 Product_Number_Of_Reviews
category = "-1" # 7 Product_Category
shipFrom = "-1" # 8 Product_ShippedFrom
shipTo = "-1" # 9 Product_ShippedTo
left = "-1" # 10 Product_QuantityLeft
escrow = "-1" # 11 Vendor_Warranty
terms = "-1" # 12 Vendor_TermsAndConditions
vendor = "-1" # 13 Vendor_Name
sold = "-1" # 14 Product_QuantitySold
addDate = "-1" # 15 Product_AddedDate
available = "-1" # 16 NOT USED ...
endDate = "-1" # 17 NOT USED ...
BTC = "-1" # 18 Product_BTC_SellingPrice
USD = "-1" # 19 Product_USD_SellingPrice
rating = "-1" # 20 Vendor_Rating
success = "-1" # 21 Vendor_Successful_Transactions
EURO = "-1" # 22 Product_EURO_SellingPrice
# Finding Product Name
name = soup.find('h1').text
name = name.replace('\n', ' ')
name = name.replace(",", "")
name = name.strip()
# Finding Vendor
vendor = soup.find('h3').find('a').text.strip()
# Finding Vendor Rating
rating = soup.find('span', {'class': "tag is-dark"}).text.strip()
# Finding Successful Transactions
success = soup.find('h3').text
success = success.replace("Vendor: ", "")
success = success.replace(vendor, "")
success = success.replace("(", "")
success = success.replace(")", "")
success = success.strip()
bae = soup.find('div', {'class': "box"}).find_all('ul')
# Finding Prices
USD = bae[1].find('strong').text.strip()
li = bae[2].find_all('li')
# Finding Escrow
escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip()
# Finding the Product Category
category = li[1].find('span', {'class': "tag is-dark"}).text.strip()
# Finding the Product Quantity Available
left = li[3].find('span', {'class': "tag is-dark"}).text.strip()
# Finding Number Sold
sold = li[4].find('span', {'class': "tag is-dark"}).text.strip()
li = bae[3].find_all('li')
# Finding Shipment Information (Origin)
if "Ships from:" in li[-2].text:
shipFrom = li[-2].text
shipFrom = shipFrom.replace("Ships from: ", "")
# shipFrom = shipFrom.replace(",", "")
shipFrom = shipFrom.strip()
# Finding Shipment Information (Destination)
shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text
shipTo = shipTo.replace("Ships to: ", "")
shipTo = shipTo.strip()
if "certain countries" in shipTo:
countries = ""
tags = li[-1].find_all('span', {'class': "tag"})
for tag in tags:
country = tag.text.strip()
countries += country + ", "
shipTo = countries.strip(", ")
# Finding the Product description
describe = soup.find('div', {'class': "pre-line"}).text
describe = describe.replace("\n", " ")
describe = describe.strip()
'''# Finding the Number of Product Reviews
tag = soup.findAll(text=re.compile('Reviews'))
for index in tag:
reviews = index
par = reviews.find('(')
if par >=0:
reviews = reviews.replace("Reviews (","")
reviews = reviews.replace(")","")
reviews = reviews.split(",")
review = str(abs(int(reviews[0])) + abs(int(reviews[1])))
else :
review = "-1"'''
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if cve:
CVE = " "
for idx in cve:
CVE += (idx)
CVE += " "
CVE = CVE.replace(',', ' ')
CVE = CVE.replace('\n', '')
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
if ms:
MS = " "
for im in ms:
MS += (im)
MS += " "
MS = MS.replace(',', ' ')
MS = MS.replace('\n', '')
# Populating the final variable (this should be a list with all fields scraped)
row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor,
sold, addDate, available, endDate, BTC, USD, rating, success, EURO)
# Sending the results
return row
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def darkfox_listing_parser(soup):
# Fields to be parsed
nm = 0 # Total_Products (Should be Integer)
mktName = "DarkFox" # 0 Marketplace_Name
name = [] # 1 Product_Name
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 3 Product_MS_Classification (Microsoft Security)
category = [] # 4 Product_Category
describe = [] # 5 Product_Description
escrow = [] # 6 Vendor_Warranty
views = [] # 7 Product_Number_Of_Views
reviews = [] # 8 Product_Number_Of_Reviews
addDate = [] # 9 Product_AddDate
lastSeen = [] # 10 Product_LastViewDate
BTC = [] # 11 Product_BTC_SellingPrice
USD = [] # 12 Product_USD_SellingPrice
EURO = [] # 13 Product_EURO_SellingPrice
sold = [] # 14 Product_QuantitySold
qLeft =[] # 15 Product_QuantityLeft
shipFrom = [] # 16 Product_ShippedFrom
shipTo = [] # 17 Product_ShippedTo
vendor = [] # 18 Vendor
rating = [] # 19 Vendor_Rating
success = [] # 20 Vendor_Successful_Transactions
href = [] # 23 Product_Links (Urls)
listing = soup.findAll('div', {"class": "card"})
# Populating the Number of Products
nm = len(listing)
for a in listing:
bae = a.findAll('a', href=True)
# Adding the url to the list of urls
link = bae[0].get('href')
link = cleanLink(link)
href.append(link)
# Finding the Product
product = bae[1].find('p').text
product = product.replace('\n', ' ')
product = product.replace(",", "")
product = product.replace("...", "")
product = product.strip()
name.append(product)
bae = a.find('div', {'class': "media-content"}).find('div').find_all('div')
if len(bae) >= 5:
# Finding Prices
price = bae[0].text
ud = price.replace(" USD", " ")
# u = ud.replace("$","")
u = ud.replace(",", "")
u = u.strip()
USD.append(u)
# bc = (prc[1]).strip(' BTC')
# BTC.append(bc)
# Finding the Vendor
vendor_name = bae[1].find('a').text
vendor_name = vendor_name.replace(",", "")
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding the Category
cat = bae[2].find('small').text
cat = cat.replace("Category: ", "")
cat = cat.replace(",", "")
cat = cat.strip()
category.append(cat)
# Finding Number Sold and Quantity Left
num = bae[3].text
num = num.replace("Sold: ", "")
num = num.strip()
sold.append(num)
quant = bae[4].find('small').text
quant = quant.replace("In stock: ", "")
quant = quant.strip()
qLeft.append(quant)
# Finding Successful Transactions
freq = bae[1].text
freq = freq.replace(vendor_name, "")
freq = re.sub(r'Vendor Level \d+', "", freq)
freq = freq.replace("(", "")
freq = freq.replace(")", "")
freq = freq.strip()
success.append(freq)
# Searching for CVE and MS categories
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue="-1"
else:
cee = " "
for idx in cve:
cee += (idx)
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cveValue=cee
CVE.append(cveValue)
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue="-1"
else:
me = " "
for im in ms:
me += (im)
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue=me
MS.append(MSValue)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen,
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href)
#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def thiefworld_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing = soup.find('div', {"class": "row tile__list tileitems_filter pad15 tileproduct__list"}).findAll('div', {"class": "desc"})
for a in listing:
bae = a.find('div', {"class": "title"}).find('a', href=True)
link = bae['href']
href.append(link)
return href

+ 25
- 30
MarketPlaces/Tor2door/crawler_selenium.py View File

@ -15,41 +15,42 @@ from selenium.webdriver.support.ui import WebDriverWait
from PIL import Image from PIL import Image
import urllib.parse as urlparse import urllib.parse as urlparse
import os, time
from datetime import date
import os, re, time
import subprocess import subprocess
import configparser
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.Tor2door.parser import tor2door_links_parser from MarketPlaces.Tor2door.parser import tor2door_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML from MarketPlaces.Utilities.utilities import cleanHTML
config = configparser.ConfigParser()
config.read('../../setup.ini')
counter = 1 counter = 1
baseURL = 'http://http://yzrrne3pveltulbavydr2kiashvlnysdwclwmklo6cyjuqpxi7ku4xqd.onion'
baseURL = 'http://yzrrne3pveltulbavydr2kiashvlnysdwclwmklo6cyjuqpxi7ku4xqd.onion'
# Opens Tor Browser, crawls the website # Opens Tor Browser, crawls the website
def startCrawling(): def startCrawling():
opentor()
# marketName = getMarketName()
driver = getAccess()
if driver != 'down':
try:
login(driver)
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closetor(driver)
# new_parse(marketName, False)
# opentor()
marketName = getMarketName()
# driver = getAccess()
#
# if driver != 'down':
# try:
# login(driver)
# crawlForum(driver)
# except Exception as e:
# print(driver.current_url, e)
# closetor(driver)
#
new_parse(marketName, baseURL, False)
# Opens Tor Browser # Opens Tor Browser
def opentor(): def opentor():
global pid global pid
print("Connecting Tor...") print("Connecting Tor...")
path = open('../../path.txt').readline().strip()
pro = subprocess.Popen(path)
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
pid = pro.pid pid = pro.pid
time.sleep(7.5) time.sleep(7.5)
input('Tor Connected. Press ENTER to continue\n') input('Tor Connected. Press ENTER to continue\n')
@ -130,12 +131,9 @@ def closetor(driver):
# Creates FireFox 'driver' and configure its 'Profile' # Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket # to use Tor proxy and socket
def createFFDriver(): def createFFDriver():
file = open('../../path.txt', 'r')
lines = file.readlines()
ff_binary = FirefoxBinary(lines[0].strip())
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(lines[1].strip())
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False) ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
@ -157,7 +155,7 @@ def createFFDriver():
ff_prof.set_preference("javascript.enabled", False) ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences() ff_prof.update_preferences()
service = Service(executable_path=lines[2].strip())
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
@ -186,15 +184,12 @@ def savePage(page, url):
# Gets the full path of the page to be saved along with its appropriate file name # Gets the full path of the page to be saved along with its appropriate file name
def getFullPathName(url): def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import CURRENT_DATE
fileName = getNameFromURL(url) fileName = getNameFromURL(url)
if isDescriptionLink(url): if isDescriptionLink(url):
fullPath = r'..\Tor2door\HTML_Pages\\' + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html'
fullPath = r'..\Tor2door\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html'
else: else:
fullPath = r'..\Tor2door\HTML_Pages\\' + str(
"%02d" % date.today().month) + str("%02d" % date.today().day) + str(
"%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html'
fullPath = r'..\Tor2door\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html'
return fullPath return fullPath


+ 44
- 55
MarketPlaces/Tor2door/parser.py View File

@ -12,29 +12,25 @@ def tor2door_description_parser(soup):
# Fields to be parsed # Fields to be parsed
name = "-1" # 0 Product_Name y
describe = "-1" # 1 Product_Description y
lastSeen = "-1" # 2 Product_LastViewDate
rules = "-1" # 3 NOT USED ...
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 5 Product_MS_Classification (Microsoft Security)
review = "-1" # 6 Product_Number_Of_Reviews
vendor = "-1" # 0 *Vendor_Name
success = "-1" # 1 Vendor_Successful_Transactions
rating_vendor = "-1" # 2 Vendor_Rating
name = "-1" # 3 *Product_Name
describe = "-1" # 4 Product_Description
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category category = "-1" # 7 Product_Category
shipFrom = "-1" # 8 Product_ShippedFrom
shipTo = "-1" # 9 Product_ShippedTo
left = "-1" # 10 Product_QuantityLeft y
escrow = "-1" # 11 Vendor_Warranty y
terms = "-1" # 12 Vendor_TermsAndConditions
vendor = "-1" # 13 Vendor_Name y
sold = "-1" # 14 Product_QuantitySold y
addDate = "-1" # 15 Product_AddedDate
available = "-1" # 16 NOT USED ...
endDate = "-1" # 17 NOT USED ...
BTC = "-1" # 18 Product_BTC_SellingPrice y
USD = "-1" # 19 Product_USD_SellingPrice y
rating = "-1" # 20 Vendor_Rating
success = "-1" # 21 Vendor_Successful_Transactions
EURO = "-1" # 22 Product_EURO_SellingPrice
views = "-1" # 8 Product_Number_Of_Views
reviews = "-1" # 9 Product_Number_Of_Reviews
rating_item = "-1" # 10 Product_Rating
addDate = "-1" # 11 Product_AddedDate
BTC = "-1" # 12 Product_BTC_SellingPrice
USD = "-1" # 13 Product_USD_SellingPrice
EURO = "-1" # 14 Product_EURO_SellingPrice
sold = "-1" # 15 Product_QuantitySold
left = "-1" # 16 Product_QuantityLeft
shipFrom = "-1" # 17 Product_ShippedFrom
shipTo = "-1" # 18 Product_ShippedTo
bae = soup.find('div', {'class': "col-9"}) bae = soup.find('div', {'class': "col-9"})
@ -57,11 +53,6 @@ def tor2door_description_parser(soup):
# half_star = bae[2].find('i', {'class': "fas fa-star-half-alt"}) # half_star = bae[2].find('i', {'class': "fas fa-star-half-alt"})
# rating = len(full_stars) + (0.5 if half_star is not None else 0) # rating = len(full_stars) + (0.5 if half_star is not None else 0)
# Finding Warranty
escrow = mb[2].text
escrow = escrow.replace("Payment:", "")
escrow = escrow.strip()
# Finding Quantity Sold and Left # Finding Quantity Sold and Left
temp = mb[4].text.split(',') temp = mb[4].text.split(',')
@ -116,8 +107,8 @@ def tor2door_description_parser(soup):
MS = MS.replace('\n', '') MS = MS.replace('\n', '')
# Populating the final variable (this should be a list with all fields scraped) # Populating the final variable (this should be a list with all fields scraped)
row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor,
sold, addDate, available, endDate, BTC, USD, rating, success, EURO)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo)
# Sending the results # Sending the results
return row return row
@ -127,29 +118,28 @@ def tor2door_description_parser(soup):
def tor2door_listing_parser(soup): def tor2door_listing_parser(soup):
# Fields to be parsed # Fields to be parsed
nm = 0 # Total_Products (Should be Integer)
mktName = "Tor2door" # 0 Marketplace_Name
name = [] # 1 Product_Name y
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 3 Product_MS_Classification (Microsoft Security)
category = [] # 4 Product_Category y
describe = [] # 5 Product_Description
escrow = [] # 6 Vendor_Warranty
views = [] # 7 Product_Number_Of_Views
reviews = [] # 8 Product_Number_Of_Reviews y
addDate = [] # 9 Product_AddDate
lastSeen = [] # 10 Product_LastViewDate
BTC = [] # 11 Product_BTC_SellingPrice
USD = [] # 12 Product_USD_SellingPrice y
EURO = [] # 13 Product_EURO_SellingPrice
sold = [] # 14 Product_QuantitySold
qLeft =[] # 15 Product_QuantityLeft
shipFrom = [] # 16 Product_ShippedFrom
shipTo = [] # 17 Product_ShippedTo
vendor = [] # 18 Vendor y
rating = [] # 19 Vendor_Rating
success = [] # 20 Vendor_Successful_Transactions
href = [] # 24 Product_Links (Urls)
nm = 0 # *Total_Products (Should be Integer)
mktName = "Tor2door" # 0 *Marketplace_Name
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating
success = [] # 3 Vendor_Successful_Transactions
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 6 Product_MS_Classification (Microsoft Security)
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description
views = [] # 9 Product_Number_Of_Views
reviews = [] # 10 Product_Number_Of_Reviews
rating_item = [] # 11 Product_Rating
addDate = [] # 12 Product_AddDate
BTC = [] # 13 Product_BTC_SellingPrice
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice
sold = [] # 16 Product_QuantitySold
qLeft =[] # 17 Product_QuantityLeft
shipFrom = [] # 18 Product_ShippedFrom
shipTo = [] # 19 Product_ShippedTo
href = [] # 20 Product_Links
listing = soup.findAll('div', {"class": "card product-card mb-3"}) listing = soup.findAll('div', {"class": "card product-card mb-3"})
@ -170,7 +160,6 @@ def tor2door_listing_parser(soup):
# Adding the url to the list of urls # Adding the url to the list of urls
link = bae[0].get('href') link = bae[0].get('href')
link = cleanLink(link)
href.append(link) href.append(link)
# Finding Product Name # Finding Product Name
@ -228,8 +217,8 @@ def tor2door_listing_parser(soup):
MS.append(MSValue) MS.append(MSValue)
# Populate the final variable (this should be a list with all fields scraped) # Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen,
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
def tor2door_links_parser(soup): def tor2door_links_parser(soup):


+ 28
- 26
MarketPlaces/Utilities/utilities.py View File

@ -207,57 +207,59 @@ def cleanLink(originalLink):
return originalLink return originalLink
def organizeProducts(marketplace, nm, nombre, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen,
BTC, USD, EURO, qLeft, shipFrom, shipTo, user, rating, success, sold, href):
def organizeProducts(marketplace, nm, vendor, rating_vendor, success_vendor, nombre, CVE, MS, category, describe,
views, reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href):
rw = [] rw = []
day = time.strftime("%m/%d/%Y") day = time.strftime("%m/%d/%Y")
#day = time.strftime("%d/%m/%Y")
ahora = time.strftime("%I:%M:%S") ahora = time.strftime("%I:%M:%S")
for n in range(nm): for n in range(nm):
lne = marketplace + "," #0
lne += "-1" if len(nombre) == 0 else nombre[n] #1
lne = marketplace # 0
lne += ","
lne += vendor[n] # 1
lne += ","
lne += "-1" if len(rating_vendor) == 0 else rating_vendor[n] # 2
lne += ","
lne += "-1" if len(success_vendor) == 0 else success_vendor[n] # 3
lne += ","
lne += nombre[n] # 4
lne += ',' lne += ','
lne += "-1" if len(CVE) == 0 else CVE[n] #2
lne += "-1" if len(describe) == 0 else describe[n] # 5
lne += "," lne += ","
lne += "-1" if len(MS) == 0 else MS[n] #3
lne += "-1" if len(CVE) == 0 else CVE[n] # 6
lne += "," lne += ","
lne += "-1" if len(category) == 0 else category[n] #4
lne += "-1" if len(MS) == 0 else MS[n] # 7
lne += "," lne += ","
lne += "-1" if len(describe) == 0 else describe[n] #5
lne += "-1" if len(category) == 0 else category[n] # 8
lne += "," lne += ","
lne += "-1" if len(escrow) == 0 else escrow[n] #6
lne += "-1" if len(views) == 0 else views[n] # 9
lne += "," lne += ","
lne += "-1" if len(views) == 0 else views[n] #7
lne += "-1" if len(reviews) == 0 else reviews[n] # 10
lne += "," lne += ","
lne += "-1" if len(reviews) == 0 else reviews[n] #8
lne += "-1" if len(rating_item) == 0 else rating_item[n] # 11
lne += "," lne += ","
lne += "-1" if len(addDate) == 0 else addDate[n] #9
lne += "-1" if len(addDate) == 0 else addDate[n] # 12
lne += "," lne += ","
lne += "-1" if len(lastSeen) == 0 else lastSeen[n] #10
lne += "-1" if len(BTC) == 0 else BTC[n] # 13
lne += "," lne += ","
lne += "-1" if len(BTC) == 0 else BTC[n] #11
lne += "-1" if len(USD) == 0 else USD[n] # 14
lne += "," lne += ","
lne += "-1" if len(USD) == 0 else USD[n] #12
lne += "-1" if len(EURO) == 0 else EURO[n] # 15
lne += "," lne += ","
lne += "-1" if len(EURO) == 0 else EURO[n] #13
lne += "-1" if len(sold) == 0 else sold[n] # 16
lne += "," lne += ","
lne += "-1" if len(sold) == 0 else sold[n] #14
lne += "-1" if len(qLeft) == 0 else qLeft[n] # 17
lne += "," lne += ","
lne += "-1" if len(qLeft) == 0 else qLeft[n] #15
lne += "-1" if len(shipFrom) == 0 else shipFrom[n] # 18
lne += "," lne += ","
lne += "-1" if len(shipFrom) == 0 else shipFrom[n] #16
lne += "-1" if len(shipTo) == 0 else shipTo[n] # 19
lne += "," lne += ","
lne += "-1" if len(shipTo) == 0 else shipTo[n] #17
lne += "," + user[n] + "," #18
lne += "-1" if len(rating) == 0 else rating[n] #19
lne += "-1" if len(href) == 0 else href[n] # 20
lne += "," lne += ","
lne += "-1" if len(success) == 0 else success[n] #20
lne += "," + "-1" + "," + day + " " + ahora + "," #21, 22
lne += "-1" if len(href) == 0 else href[n] #23
lne += day + " " + ahora # 21
rw.append(lne) rw.append(lne)


+ 14
- 0
setup.ini View File

@ -0,0 +1,14 @@
[TOR]
firefox_binary_path = C:\Users\calsyslab\Desktop\Tor Browser\Browser\firefox.exe
firefox_profile_path = C:\Users\calsyslab\Desktop\Tor Browser\Browser\TorBrowser\Data\Browser\profile.default
geckodriver_path = C:\Users\calsyslab\Projects\dw_pipeline_test\selenium\geckodriver.exe
[Project]
project_directory = C:\Users\calsyslab\Projects\dw_pipeline_test
shared_folder = \\VBoxSvr\VM_Files_(shared)
[PostgreSQL]
ip = localhost
username = postgres
password = password
database = darkweb_markets_forums

Loading…
Cancel
Save