Browse Source

Merge branch 'main' into gofish_dev

# Conflicts:
#	MarketPlaces/Initialization/marketsList.txt
#	MarketPlaces/Initialization/markets_mining.py
#	MarketPlaces/Initialization/prepare_parser.py
main
Joshua 1 year ago
parent
commit
bff8156764
21 changed files with 1554 additions and 377 deletions
  1. +0
    -0
      Forums/HiddenAnswers/parser.py
  2. +289
    -0
      Forums/Incogsnoo/crawler_selenium.py
  3. +271
    -0
      Forums/Incogsnoo/parser.py
  4. +82
    -0
      Forums/Initialization/prepare_parser.py
  5. +16
    -15
      MarketPlaces/BlackPyramid/crawler_selenium.py
  6. +21
    -15
      MarketPlaces/BlackPyramid/parser.py
  7. +1
    -1
      MarketPlaces/Bohemia/crawler_selenium.py
  8. +20
    -14
      MarketPlaces/CityMarket/crawler_selenium.py
  9. +13
    -23
      MarketPlaces/CityMarket/parser.py
  10. +7
    -6
      MarketPlaces/CypherMarketplace/crawler_selenium.py
  11. +2
    -2
      MarketPlaces/CypherMarketplace/parser.py
  12. +1
    -1
      MarketPlaces/DB_Connection/db_connection.py
  13. +19
    -0
      MarketPlaces/Initialization/markets_mining.py
  14. +53
    -65
      MarketPlaces/MikesGrandStore/crawler_selenium.py
  15. +194
    -199
      MarketPlaces/MikesGrandStore/parser.py
  16. +352
    -0
      MarketPlaces/TheDarkMarket/crawler_selenium.py
  17. +182
    -0
      MarketPlaces/TheDarkMarket/parser.py
  18. +1
    -2
      MarketPlaces/Utilities/utilities.py
  19. +20
    -25
      MarketPlaces/WeTheNorth/crawler_selenium.py
  20. +6
    -5
      MarketPlaces/WeTheNorth/parser.py
  21. +4
    -4
      setup.ini

+ 0
- 0
Forums/HiddenAnswers/parser.py View File


+ 289
- 0
Forums/Incogsnoo/crawler_selenium.py View File

@ -0,0 +1,289 @@
__author__ = 'DarkWeb'
'''
Incogsnoo Forum Crawler (Selenium)
'''
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.by import By
import urllib.parse as urlparse
import os, time
from datetime import date
import subprocess
from bs4 import BeautifulSoup
from Forums.Initialization.prepare_parser import new_parse
from Forums.Incogsnoo.parser import incogsnoo_links_parser
from Forums.Utilities.utilities import cleanHTML
counter = 1
baseURL = 'http://tedditfyn6idalzso5wam5qd3kdtxoljjhbrbbx34q2xkcisvshuytad.onion/'
# Opens Tor Browser, crawls the website, then parses, then closes tor
#acts like the main method for the crawler, another function at the end of this code calls this function later
def startCrawling():
forumName = getForumName()
driver = getAccess()
if driver != 'down':
try:
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closeDriver(driver)
new_parse(forumName, baseURL, True)
# Returns the name of the website
#return: name of site in string type
def getForumName():
name = 'Incogsnoo'
return name
# Return the base link of the website
#return: url of base site in string type
def getFixedURL():
url = 'http://tedditfyn6idalzso5wam5qd3kdtxoljjhbrbbx34q2xkcisvshuytad.onion/'
return url
# Closes Tor Browser
#@param: current selenium driver
def closeDriver(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
from Forums.Initialization.forums_mining import config
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)#might need to turn off
ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", True)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
driver.maximize_window()
return driver
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
#return: return the selenium driver or string 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
# Gets the full path of the page to be saved along with its appropriate file name
#@param: raw url as crawler crawls through every site
def getFullPathName(url):
from Forums.Initialization.forums_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages")
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
return fullPath
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
#@param: raw url as crawler crawls through every site
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if (name == ''):
name = str(counter)
counter = counter + 1
return name
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
#in this example, there are a couple of categories some threads fall under such as
#exploits, malware, and hacking tutorials
def getInterestedLinks():
links = []
# Malware
links.append('http://tedditfyn6idalzso5wam5qd3kdtxoljjhbrbbx34q2xkcisvshuytad.onion/r/Malware')
#
return links
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
#topic and description pages are crawled through here, where both types of pages are saved
#@param: selenium driver
def crawlForum(driver):
print("Crawling the Incogsnoo forum")
# edge cases:
# 1. if a comment thread goes too deep, need to click "continue this thread" to show more replies
# 2. the site will sometimes rate limit you and not show the contents.
# right now, there is no detection mechanism and it won't throw any errors
linksToCrawl = getInterestedLinks()
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
has_next_page = True
count = 0
while has_next_page:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, link)
topics = topicPages(html)
for topic in topics:
has_next_topic_page = True
counter = 1
page = topic
while has_next_topic_page:
itemURL = urlparse.urljoin(baseURL, str(page))
try:
driver.get(itemURL)
except:
driver.refresh()
if isListingLink(driver.current_url):
break
savePage(driver, driver.page_source, topic + f"page{counter}") # very important
# comment out
if counter == 2:
break
try:
# incogsnoo doesn't have next button to load more pages of the description
link_tag = driver.find_element(by=By.XPATH, value="/html/body/div[2]/div[last()]/a[contains(text(),'next')]")
link = link_tag.get_attribute("href")
if link == "":
raise NoSuchElementException
counter += 1
except NoSuchElementException:
has_next_topic_page = False
# making sure we go back to the listing page (browser back button simulation)
try:
driver.get(link)
except:
driver.refresh()
# comment out
# break
# comment out
if count == 1:
break
try:
link_tag = driver.find_element(by=By.XPATH, value="/html/body/div[2]/div[last()]/a[contains(text(),'next')]")
link = link_tag.get_attribute("href")
if link == "":
raise NoSuchElementException
count += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
print("Crawling the Incogsnoo forum done.")
# Returns 'True' if the link is a description link
#@param: url of any url crawled
#return: true if is a description page, false if not
def isDescriptionLink(url):
if 'comments' in url:
return True
return False
# Returns True if the link is a listingPage link
#@param: url of any url crawled
#return: true if is a Listing page, false if not
def isListingLink(url):
if isDescriptionLink(url):
return False
return True
# calling the parser to define the links, the html is the url of a link from the list of interested link list
#@param: link from interested link list
#return: list of description links that should be crawled through
def topicPages(html):
soup = BeautifulSoup(html, "html.parser")
#print(soup.find('div', {"class": "forumbg"}).find('ul', {"class": "topiclist topics"}).find('li', {"class": "row bg1"}).find('a', {"class": "topictitle"}, href=True))
return incogsnoo_links_parser(soup)
def crawler():
startCrawling()
# print("Crawling and Parsing BestCardingWorld .... DONE!")

+ 271
- 0
Forums/Incogsnoo/parser.py View File

@ -0,0 +1,271 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from Forums.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
# This is the method to parse the Description Pages (one page to each topic in the Listing Pages)
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of description page
#return: 'row' that contains a variety of lists that each hold info on the description page
def incogsnoo_description_parser(soup):
# Fields to be parsed
topic = "-1" # 0 topic name ***$
user = [] # 1 all users of each post ***$ author
status = [] # 2 all user's authority in each post such as (adm, member, dangerous)
reputation = [] # 3 all users's karma in each post (usually found as a number) ??? ups
interest = [] # 4 all user's interest in each post
sign = [] # 5 all user's signature in each post (usually a standard message after the content of the post)
post = [] # 6 all messages of each post
feedback = [] # 7 all feedbacks of each user (this was found in just one Forum and with a number format)
addDate = [] # 8 all dated of each post ***$ created
image_user = [] # 9 all user avatars of each post
image_post = [] # 10 all first images of each post
# Finding the topic (should be just one coming from the Listing Page)
topic = soup.find("div", {"class": "title"}).find("h2").text
topic = topic.replace('"', '')
topic = cleanString(topic.strip())
# the first post's html is separated from all subsequent comments/replies/posts to the first post
# so parse the first post by itself first
# Finding body of first post
post_text = soup.find("div", {"class": "md"})
if post_text:
post_text = post_text.text.strip()
post.append(cleanString(post_text))
else: # some posts just links to other sites/articles/videos and have no text by itself
post_link = soup.find("div", {"class": "title"}).find("a").get("href")
post_link = cleanLink(post_link)
post.append(post_link)
# User
p_tag = soup.find("p", {"class": "submitted"})
author = p_tag.find("a")
if author:
author = author.text.strip()
elif "[deleted]" in p_tag.text:
author = "deleted"
else:
author = "-1"
user.append(cleanString(author))
# Finding the status of the author
status.append("-1")
# Finding the reputation of the user
reputation.append("-1")
# Finding the interest of the author
interest.append("-1")
# Finding signature
sign.append("-1")
# Finding feedback
upvote = soup.find("div", {"class": "score"}).find("span")
if upvote:
upvote = upvote.text.strip()
else:
upvote = "-1"
feedback.append(cleanString(upvote))
# Finding the date of the post - e.g. "Fri, 18 December 2023 05:49:20 GMT"
dt = soup.find("p", {"class": "submitted"}).find("span")["title"]
# Convert to datetime object - e.g. 2023-12-18 05:49:20
date_time_obj = datetime.strptime(dt, '%a, %d %b %Y %H:%M:%S %Z')
sdate = date_time_obj.strftime('%m %d %Y')
stime = date_time_obj.strftime('%I:%M %p')
date = convertDate(sdate, "english", datetime.now()) + " " + stime
# e.g. "12/18/2023 05:49 AM"
addDate.append(date)
image_user.append("-1")
image_post.append("-1")
posts = soup.find("div", {"class": "comments"}).findAll("details")
# For each message (post), get all the fields we are interested to:
for ipost in posts:
# Finding user
p_tag = ipost.find("p", {"class": "author"})
author = p_tag.find("a")
if author:
author = author.text.strip()
elif "[deleted]" in p_tag.text:
author = "deleted"
else:
author = "-1"
user.append(cleanString(author))
# Finding the status of the author
status.append("-1")
# Finding the reputation of the user
reputation.append("-1")
# Finding the interest of the author
interest.append("-1")
# Finding signature
sign.append("-1")
# Finding the post
comment = ipost.find("div", {"class": "md"})
if comment:
comment = comment.text.strip()
else:
comment = "-1"
post.append(cleanString(comment))
# Finding feedback
upvote = ipost.find("p", {"class": "ups"})
if upvote:
upvote = upvote.text.strip().split()[0]
else:
upvote = "-1"
feedback.append(cleanString(upvote))
# Finding the date of the post - e.g. "Fri, 18 December 2023 05:49:20 GMT"
dt = ipost.find("p", {"class": "created"})["title"]
# Convert to datetime object - e.g. 2023-12-18 05:49:20
date_time_obj = datetime.strptime(dt, '%a, %d %b %Y %H:%M:%S %Z')
sdate = date_time_obj.strftime('%m %d %Y')
stime = date_time_obj.strftime('%I:%M %p')
date = convertDate(sdate, "english", datetime.now()) + " " + stime
# e.g. "12/18/2023 05:49 AM"
addDate.append(date)
image_user.append("-1")
image_post.append("-1")
# Populate the final variable (this should be a list with all fields scraped)
row = (topic, user, status, reputation, interest, sign, post, feedback, addDate, image_user, image_post)
# Sending the results
return row
# This is the method to parse the Listing Pages (one page with many posts)
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def incogsnoo_listing_parser(soup):
nm = 0 # *this variable should receive the number of topics
forum = "Incogsnoo" # 0 *forum name
board = "-1" # 1 *board name (the previous level of the topic in the Forum categorization tree.
# For instance: Security/Malware/Tools to hack Facebook. The board here should be Malware)
author = [] # 2 *all authors of each topic
topic = [] # 3 *all topics
views = [] # 4 number of views of each topic
posts = [] # 5 number of posts of each topic
href = [] # 6 this variable should receive all cleaned urls (we will use this to do the marge between
# Listing and Description pages)
addDate = [] # 7 when the topic was created (difficult to find)
image_author = [] # 8 all author avatars used in each topic
# Finding the board (should be just one)
board = soup.find("a", {"class": "subreddit"}).find("h2")
board = cleanString(board.text.strip())
# Finding the repeated tag that corresponds to the listing of topics
itopics = soup.find("div", {"id": "links", "class": "sr"}).findAll("div", {"class": "link"})
itopics.pop()
# Counting how many topics we have found so far
nm = len(itopics)
index = 0
for itopic in itopics:
# Finding the author of the topic
p_tag = itopic.find("p", {"class": "submitted"})
user = p_tag.find("a")
if user:
user = user.text.strip()
elif "[deleted]" in p_tag.text:
user = "deleted"
else:
user = "-1"
author.append(cleanString(user))
# Adding the topic to the topic list
topic_title = itopic.find("div", {"class": "title"}).find("h2").text
topic.append(cleanString(topic_title))
# Finding the number of Views
views.append("-1")
# Finding the number of posts
comments = itopic.find("a", {"class": "comments"}).text
number_comments = comments.split()[0]
posts.append(cleanString(number_comments))
# Adding the url to the list of urls
link = itopic.find("a", {"class": "comments"}).get("href")
link = cleanLink(link)
href.append(link)
# Finding dates
p_tag = itopic.find("p", {"class": "submitted"})
dt = p_tag.find("span")["title"]
date_time_obj = datetime.strptime(dt,'%a, %d %b %Y %H:%M:%S %Z')
sdate = date_time_obj.strftime('%m %d %Y')
stime = date_time_obj.strftime('%I:%M %p')
date = convertDate(sdate, "english", datetime.now()) + " " + stime
# e.g. "12/18/2023 05:49 AM"
addDate.append(date)
image_author.append("-1")
index += 1
return organizeTopics(forum, nm, board, author, topic, views, posts, href, addDate, image_author)
#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def incogsnoo_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing_parent = soup.find("div", {"id": "links", "class": "sr"})
listing = listing_parent.findAll("div", {"class": "entry"})
count = 0
for entry in listing:
parent_div = entry.find("div", {"class": "meta"}).find("div", {"class", "links"})
a_tag = parent_div.find("a", {"class", "comments"})
if a_tag:
href.append(a_tag.get("href"))
# if count == 10:
# break
count += 1
return href

+ 82
- 0
Forums/Initialization/prepare_parser.py View File

@ -224,6 +224,50 @@ def new_parse(forum, url, createLog):
try:
logFile = open(mainDir + f"/{CURRENT_DATE}/" + forum + "_" + CURRENT_DATE + ".log", "w")
except:
try:
html = open(line2.strip('\n'))
soup = BeautifulSoup(html, "html.parser")
html.close()
except:
nError += 1
print("There was a problem to read the file " + line2 + " in the Description section!")
if createLog:
logFile.write(str(nError) + ". There was a problem to read the file " + line2 + " in the Description section!\n")
continue
try:
if forum == "BestCardingWorld":
rmm = bestcardingworld_description_parser(soup)
elif forum == "Cardingleaks":
rmm = cardingleaks_description_parser(soup)
elif forum == "CryptBB":
rmm = cryptBB_description_parser(soup)
elif forum == "OnniForums":
rmm = onniForums_description_parser(soup)
elif forum == "Altenens":
rmm = altenens_description_parser(soup)
elif forum == "Procrax":
rmm = procrax_description_parser(soup)
elif forum == "Libre":
rmm = libre_description_parser(soup)
elif forum == "HiddenAnswers":
rmm = HiddenAnswers_description_parser(soup)
# key = u"Top:" + rmm[0].upper().strip() + u" User:" + rmm[2][0].upper().strip()
key = u"Url:" + os.path.basename(line2).replace(".html", "")
# check if "page1" exists at the end of a string
# if yes add to first page directory if no add to other
check = re.compile(r'page1$')
if check.search(key):
# print(key, 'is a first page\n')
detPage[key] = {'rmm': rmm, 'files': [os.path.basename(line2)]}
else:
# print(key, 'is an other page\n')
other[key] = {'rmm': rmm, 'filename': os.path.basename(line2)}
print("Could not open log file!")
createLog = False
logFile = None
@ -260,6 +304,44 @@ def new_parse(forum, url, createLog):
if doDescription:
if not readError:
parseError = False
try:
if forum == "BestCardingWorld":
rw = bestcardingworld_listing_parser(soup)
elif forum == "Cardingleaks":
rw = cardingleaks_listing_parser(soup)
elif forum == "CryptBB":
rw = cryptBB_listing_parser(soup)
elif forum == "OnniForums":
rw = onniForums_listing_parser(soup)
elif forum == "Altenens":
rw = altenens_listing_parser(soup)
elif forum == "Procrax":
rw = procrax_listing_parser(soup)
elif forum == "Libre":
rw = libre_listing_parser(soup)
elif forum == "HiddenAnswers":
rw = HiddenAnswers_listing_parser(soup)
except:
nError += 1
print("There was a problem to read the file " + line1 + " in the listing section!")
traceback.print_exc()
if createLog:
logFile.write(
str(nError) + ". There was a problem to read the file " + line1 + " in the Listing section.\n")
parseError = True
if not parseError:
persistError = False
moveError = False
num_in_db = 0
num_persisted_moved = 0
nFound = 0
for rec in rw:


+ 16
- 15
MarketPlaces/BlackPyramid/crawler_selenium.py View File

@ -45,7 +45,7 @@ def startCrawling():
print(driver.current_url, e)
closetor(driver)
new_parse(marketName, baseURL, False)
new_parse(marketName, baseURL, True)
# Login
@ -207,11 +207,16 @@ def goToPage(driver, page):
def getInterestedLinks():
links = []
# h11 -> Hacking Tools
links.append('h11')
# g3 -> Guides, Hacking
# se3 -> Services, Hacking
# f6 -> Fraud software
links = ['h11','g3','se3','f6']
links.append('g3')
# se3 -> Services
links.append('se11')
# f6 -> Fraud
links.append('f11')
return links
@ -226,6 +231,7 @@ def crawlForum(driver):
for listing in pages:
print('Crawling :', listing)
try:
driver.get(baseURL)
goToPage(driver, listing)
has_next_page = True
@ -251,12 +257,12 @@ def crawlForum(driver):
# can't use the back button in dark pyramid
# driver.back()
# comment out
break
# comment out
if count == 1:
break
# # comment out
# break
#
# # comment out
# if count == 1:
# break
# go to next page of market
try:
@ -322,8 +328,3 @@ def productPages(html):
def crawler():
startCrawling()
# print("Crawling and Parsing BestCardingWorld .... DONE!")
if __name__ == "__main__":
#crawler()
new_parse("BlackPyramid", baseURL, False)

+ 21
- 15
MarketPlaces/BlackPyramid/parser.py View File

@ -43,6 +43,12 @@ def blackpyramid_description_parser(soup):
name = name.replace(",", "")
name = name.strip()
# Finding Product Rating
rating_span = soup.find('span', {'class': 'to3098503t'}).find_next_sibling('span')
rating_num = rating_span.find('b').text
if rating_num != 'N/A':
rating_item = rating_num[0:3]
# product description
describe = soup.findAll('div', {'class': 'fer048953'})[1].text
describe = describe.replace('\n', ' ')
@ -57,11 +63,11 @@ def blackpyramid_description_parser(soup):
vendor = vendor.replace(",", "")
vendor = vendor.strip()
# Finding Vendor Rating
rating_span = soup.find('span', {'class': 'to3098503t'}).find_next_sibling('span')
rating_num = rating_span.find('b').text
if rating_num != 'N/A':
rating = rating_num[0:3]
# Finding Product Rating
rating_div = soup.find('div', {'class': 'bold03905 vstat364'}).find_next_sibling('div').find_next_sibling('div')
rating_vendor = cleanNumbers(rating_div.text)
if rating_vendor == "":
rating_vendor = "-1"
# Finding Successful Transactions
success_container = soup.find('ul', {'class': 'ul3o00953'}).findAll('li')[1]
@ -102,7 +108,7 @@ def blackpyramid_description_parser(soup):
positive = soup.find('span', {'class': 'ar04999324'}).text
neutral = soup.find('span', {'class': 'ti9400005 can39953'}).text
negative = soup.find('span', {'class': 'ti9400005 ti90088 can39953'}).text
review = int(positive) + int(neutral) + int(negative)
reviews = int(positive) + int(neutral) + int(negative)
# Finding product image
image = soup.find('img', {'class': 'img0390503'})
@ -147,7 +153,7 @@ def blackpyramid_listing_parser(soup):
# Fields to be parsed
nm = 0 # *Total_Products (Should be Integer)
mktName = "Black Pyramid" # 0 *Marketplace_Name
mktName = "BlackPyramid" # 0 *Marketplace_Name
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating
success = [] # 3 Vendor_Successful_Transactions
@ -196,14 +202,14 @@ def blackpyramid_listing_parser(soup):
product = product.strip()
name.append(product)
# Finding description
# 'recurisve = False' only searches direct children
desc = card.findChildren('div', recursive=False)[0]
desc = desc.findAll('div', recursive=False)[3].text
desc = desc.replace('\n', ' ')
desc = desc.replace(",", "")
desc = desc.strip()
describe.append(desc)
# # Finding description
# # 'recurisve = False' only searches direct children
# desc = card.findChildren('div', recursive=False)[0]
# desc = desc.findAll('div', recursive=False)[3].text
# desc = desc.replace('\n', ' ')
# desc = desc.replace(",", "")
# desc = desc.strip()
# describe.append(desc)
# Finding Vendor Name
vendor_name = bae[4].find('span').text


+ 1
- 1
MarketPlaces/Bohemia/crawler_selenium.py View File

@ -42,7 +42,7 @@ def startCrawling():
print(driver.current_url, e)
closeDriver(driver)
new_parse(mktName, False)
new_parse(marketPlace=mktName, url=baseURL, createLog=False)
def login(driver):


+ 20
- 14
MarketPlaces/CityMarket/crawler_selenium.py View File

@ -144,6 +144,7 @@ def login(driver):
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '//*[@id="collapse3"]')))
# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
@ -186,10 +187,18 @@ def getInterestedLinks():
links = []
# # Hire hacker
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=3')
# # ddos
# links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=3')
# # other
# links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=14')
# malware
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=15')
# ddos
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=16')
# # hacking service
# software
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=17')
# botnet
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=18')
# hacking service
links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=31')
return links
@ -217,7 +226,7 @@ def crawlForum(driver):
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, link)
savePage(driver, html, linksToCrawl[i] + f"page{count+1}")
list = productPages(html)
for item in list:
@ -230,18 +239,15 @@ def crawlForum(driver):
savePage(driver, driver.page_source, item)
driver.back()
# comment out
# break
# comment out
"""count += 1
if count == 1:
break"""
# # comment out
# break
#
# # comment out
# if count == 1:
# break
try:
#link = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div/div[2]/nav/ul/li[5]/a').get_attribute('href')
link = driver.find_element(by=By.XPATH,
value='//a[@rel="next"]').get_attribute('href')
link = driver.find_element(by=By.XPATH, value='//a[@rel="next"]').get_attribute('href')
if link == "":
raise NoSuchElementException
count += 1


+ 13
- 23
MarketPlaces/CityMarket/parser.py View File

@ -57,19 +57,6 @@ def city_description_parser(soup):
describe = soup.find('div', {'class': "text-white"}).text
describe = cleanString(describe.strip())
'''# Finding the Number of Product Reviews
tag = soup.findAll(text=re.compile('Reviews'))
for index in tag:
reviews = index
par = reviews.find('(')
if par >=0:
reviews = reviews.replace("Reviews (","")
reviews = reviews.replace(")","")
reviews = reviews.split(",")
review = str(abs(int(reviews[0])) + abs(int(reviews[1])))
else :
review = "-1"'''
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if cve:
@ -138,7 +125,6 @@ def city_listing_parser(soup):
# Adding the url to the list of urls
link = bae[0].get('href')
link = cleanLink(link)
href.append(link)
# Category
@ -156,15 +142,19 @@ def city_listing_parser(soup):
# USD and BTC Price
price = a.find('div', {"class": "price"}).text
tempUSD = price.split("~")[0]
tempUSD = tempUSD.replace("$", "")
tempUSD = tempUSD.strip()
USD.append(tempUSD)
tempBTC = price.split("~")[1]
tempBTC = tempBTC.replace("BTC", "")
tempBTC = tempBTC.strip()
BTC.append(tempBTC)
if "~" in price:
tempUSD = price.split("~")[0]
tempUSD = tempUSD.replace("$", "")
tempUSD = tempUSD.strip()
USD.append(tempUSD)
tempBTC = price.split("~")[1]
tempBTC = tempBTC.replace("BTC", "")
tempBTC = tempBTC.strip()
BTC.append(tempBTC)
else:
USD.append("-1")
BTC.append("-1")
# Img
product_image = a.find('img')


+ 7
- 6
MarketPlaces/CypherMarketplace/crawler_selenium.py View File

@ -144,6 +144,7 @@ def login(driver):
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, '//input[@name="search"]')))
# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
@ -237,12 +238,12 @@ def crawlForum(driver):
savePage(driver, driver.page_source, item)
driver.back()
# comment out
break
# comment out
if count == 1:
break
# # comment out
# break
#
# # comment out
# if count == 1:
# break
try:
# temp = driver.find_element(by=By.XPATH, value=


+ 2
- 2
MarketPlaces/CypherMarketplace/parser.py View File

@ -281,10 +281,10 @@ def cyphermarketplace_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing = soup.findAll('div', {"class": "card-body"})
listing = soup.findAll('div', {"class": "col-12 col-sm-6 col-md-4 my-1"})
for a in listing:
bae = a.find('a', {"class": "text-info"}, href=True)
bae = a.find('div', {"class": "card-body"}).find('a', {"class": "text-info"}, href=True)
link = bae['href']
href.append(link)

+ 1
- 1
MarketPlaces/DB_Connection/db_connection.py View File

@ -437,7 +437,7 @@ def create_database(cur, con):
cur.execute(sql)
sql = "Create table marketplaces_status (market_id integer NOT NULL, date_inserted date NOT NULL, " \
"listings integer NOT NULL, descriptions integer NOT NULL, status bit(1) NOT NULL, date_reference date NOT NULL " \
"listings integer NOT NULL, descriptions integer NOT NULL, status bit(1) NOT NULL, date_reference date NOT NULL, " \
"CONSTRAINT marketplaces_log_pkey PRIMARY KEY (market_id, date_inserted), " \
"CONSTRAINT marketplaces_fk FOREIGN KEY (market_id) REFERENCES marketplaces (market_id))"
cur.execute(sql)


+ 19
- 0
MarketPlaces/Initialization/markets_mining.py View File

@ -14,7 +14,14 @@ from MarketPlaces.M00nkeyMarket.crawler_selenium import crawler as crawlerM00nke
from MarketPlaces.ViceCity.crawler_selenium import crawler as crawlerViceCity
from MarketPlaces.CypherMarketplace.crawler_selenium import crawler as crawlerCypher
from MarketPlaces.PabloEscobarMarket.crawler_selenium import crawler as crawlerPabloEscobar
from MarketPlaces.DarkBazar.crawler_selenium import crawler as crawlerDarkBazar
from MarketPlaces.Sonanza.crawler_selenium import crawler as crawlerSonanza
from MarketPlaces.Kingdom.crawler_selenium import crawler as crawlerKingdom
from MarketPlaces.BlackPyramid.crawler_selenium import crawler as crawlerBlackPyramid
from MarketPlaces.Quest.crawler_selenium import crawler as crawlerQuest
from MarketPlaces.Ares.crawler_selenium import crawler as crawlerAres
from MarketPlaces.Bohemia.crawler_selenium import crawler as crawlerBohemia
from MarketPlaces.TheDarkMarket.crawler_selenium import crawler as crawlerTheDarkMarket
from MarketPlaces.GoFish.crawler_selenium import crawler as crawlerGoFish
import configparser
@ -109,9 +116,21 @@ if __name__ == '__main__':
crawlerCypher()
elif mkt == "PabloEscobarMarket":
crawlerPabloEscobar()
elif mkt == "DarkBazar":
crawlerDarkBazar()
elif mkt == "Sonanza":
crawlerSonanza()
elif mkt == "Kingdom":
crawlerKingdom()
elif mkt == "BlackPyramid":
crawlerBlackPyramid()
elif mkt == "Quest":
crawlerQuest()
elif mkt == "Ares":
crawlerAres()
elif mkt == "GoFish":
crawlerGoFish()
elif mkt == "TheDarkMarket":
crawlerTheDarkMarket()
print("\nScraping process completed!")

+ 53
- 65
MarketPlaces/MikesGrandStore/crawler_selenium.py View File

@ -1,7 +1,7 @@
__author__ = 'Helium'
__author__ = 'cern'
'''
Mikes Grand Store Crawler (Selenium)
MikesGrandStore Marketplace Crawler (Selenium)
'''
from selenium import webdriver
@ -10,6 +10,7 @@ from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
@ -21,22 +22,21 @@ import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.MikesGrandStore.parser import mikesgrandstore_links_parser
from MarketPlaces.MikesGrandStore.parser import MikesGrandStore_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
counter = 1
baseURL = 'http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion/'
baseURL = 'http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion'
# Opens Tor Browser, crawls the website, then parses, then closes tor
#acts like the main method for the crawler, another function at the end of this code calls this function later
def startCrawling():
mktName = getMKTName()
driver = getAccess()
if driver != 'down':
try:
login(driver)
# Login not needed in MikesGrandStore
# login(driver)
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
@ -46,21 +46,18 @@ def startCrawling():
# Returns the name of the website
#return: name of site in string type
def getMKTName():
name = 'MikesGrandStore'
return name
# Return the base link of the website
#return: url of base site in string type
def getFixedURL():
url = 'http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion/'
url = 'http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion'
return url
# Closes Tor Browser
#@param: current selenium driver
def closeDriver(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
@ -86,8 +83,8 @@ def createFFDriver():
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)
ff_prof.set_preference("network.http.sendRefererHeader", 0)
# ff_prof.set_preference("network.dns.disablePrefetch", True)
# ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
@ -110,7 +107,6 @@ def createFFDriver():
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
#return: return the selenium driver or string 'down'
def getAccess():
url = getFixedURL()
driver = createFFDriver()
@ -122,16 +118,27 @@ def getAccess():
return 'down'
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
# then allows for manual solving of captcha in the terminal
#@param: current selenium web driver
def login(driver):
# wait for page to show up (This Xpath may need to change based on different seed url)
input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
# entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value='//input[@name="username"]')
# Username here
usernameBox.send_keys('aliciamykeys')
passwordBox = driver.find_element(by=By.XPATH, value='//input[@name="password"]')
# Password here
passwordBox.send_keys('aliciawherearemykey$')
# session time
session_select = Select(driver.find_element(by=By.XPATH, value='/html/body/main/div/div/div/div/div/form/div[4]/div/div[2]/select'))
session_select.select_by_visible_text('Session 60min')
input("Press ENTER when CAPTCHA is completed and you exit the newsletter\n")
# wait for listing page show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div[1]/header/div/div[3]/div/div/ul/li[6]/a")))
(By.XPATH, '//*[@id="submit"]')))
# Saves the crawled html page, makes the directory path for html pages if not made
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
filePath = getFullPathName(url)
@ -140,50 +147,43 @@ def savePage(driver, page, url):
return
# Gets the full path of the page to be saved along with its appropriate file name
#@param: raw url as crawler crawls through every site
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else:
if isListingLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
return fullPath
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
#@param: raw url as crawler crawls through every site
def getMKTName() -> str:
name = 'MikesGrandStore'
return name
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if (name == ''):
if name == '':
name = str(counter)
counter = counter + 1
return name
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
#in this example, there are a couple of categories some threads fall under such as
# Guides and Tutorials, Digital Products, and Software and Malware
#as you can see they are categories of products
def getInterestedLinks():
links = []
# Hacking and DDOS
# Hacking
links.append('http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion/product-category/hacking/')
# # databases
# links.append('http://4yx2akutmkhwfgzlpdxiah7cknurw6vlddlq24fxa3r3ebophwgpvhyd.onion/product-category/databases/')
return links
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
#topic and description pages are crawled through here, where both types of pages are saved
#@param: selenium driver
def crawlForum(driver):
print("Crawling the MikesGrandStore market")
linksToCrawl = getInterestedLinks()
@ -205,6 +205,7 @@ def crawlForum(driver):
savePage(driver, html, link)
list = productPages(html)
for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
@ -215,15 +216,15 @@ def crawlForum(driver):
driver.back()
# comment out
break
#break
# comment out
if count == 1:
break
#if count == 1:
# break
# go to next page
try:
link = driver.find_element(by=By.XPATH, value=
'/html/body/div[1]/main/div/div[1]/div/div[3]/nav/ul/li[6]/a').get_attribute('href')
link = driver.find_element(by=By.XPATH, value="//a[@class='next page-number']").get_attribute('href')
if link == "":
raise NoSuchElementException
count += 1
@ -238,41 +239,28 @@ def crawlForum(driver):
print("Crawling the MikesGrandStore market done.")
# Returns 'True' if the link is a description link
#@param: url of any url crawled
#return: true if is a description page, false if not
# Returns 'True' if the link is Topic link, may need to change for every website
def isDescriptionLink(url):
if 'product/' in url:
if 'item' in url:
return True
return False
# Returns True if the link is a listingPage link
#@param: url of any url crawled
#return: true if is a Listing page, false if not
# Returns True if the link is a listingPage link, may need to change for every website
def isListingLink(url):
if 'product-category' in url:
if 'category' in url:
return True
return False
# calling the parser to define the links, the html is the url of a link from the list of interested link list
#@param: link from interested link list ie. getInterestingLinks()
#return: list of description links that should be crawled through
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return mikesgrandstore_links_parser(soup)
# Drop links that "signout"
# def isSignOut(url):
# #absURL = urlparse.urljoin(url.base_url, url.url)
# if 'signout' in url.lower() or 'logout' in url.lower():
# return True
#
# return False
return MikesGrandStore_links_parser(soup)
def crawler():
startCrawling()
# print("Crawling and Parsing BestCardingWorld .... DONE!")
if __name__ == '__main__':
startCrawling()

+ 194
- 199
MarketPlaces/MikesGrandStore/parser.py View File

@ -1,223 +1,211 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from typing import List, Tuple
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
def mikesGrandStore_description_parser(soup: BeautifulSoup) -> Tuple:
name = "-1" # 0 Product_Name
describe = "-1" # 1 Product_Description
lastSeen = "-1" # 2 Product_LastViewDate
rules = "-1" # 3 NOT USED ...
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 5 Product_MS_Classification (Microsoft Security)
review = "-1" # 6 Product_Number_Of_Reviews
# parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
# stores info it needs in different lists, these lists are returned after being organized
# @param: soup object looking at html page of description page
# return: 'row' that contains a variety of lists that each hold info on the description page
def MikesGrandStore_description_parser(soup):
# Fields to be parsed
vendor = "-1" # 0 *Vendor_Name
success = "-1" # 1 Vendor_Successful_Transactions
rating_vendor = "-1" # 2 Vendor_Rating
name = "-1" # 3 *Product_Name
describe = "-1" # 4 Product_Description
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category
shipFrom = "-1" # 8 Product_ShippedFrom
shipTo = "-1" # 9 Product_ShippedTo
left = "-1" # 10 Product_QuantityLeft
escrow = "-1" # 11 Vendor_Warranty
terms = "-1" # 12 Vendor_TermsAndConditions
vendor = "-1" # 13 Vendor_Name
sold = "-1" # 14 Product_QuantitySold
addDate = "-1" # 15 Product_AddedDate
available = "-1" # 16 NOT USED ...
endDate = "-1" # 17 NOT USED ...
BTC = "-1" # 18 Product_BTC_SellingPrice
USD = "-1" # 19 Product_USD_SellingPrice
rating = "-1" # 20 Vendor_Rating
success = "-1" # 21 Vendor_Successful_Transactions
EURO = "-1" # 22 Product_EURO_SellingPrice
name: str = soup.find("h1", {"class": "product-title product_title entry-title"}).text
describe = soup.find("div", {"id": "tab-description"}).text
commentsList: List[BeautifulSoup] = soup.find("ol", {"class": "commentlist"}).find_all("li")
if len(commentsList) > 0:
lastReview: BeautifulSoup = commentsList[0]
lastSeen = lastReview.find("time").get("datetime").text
reviewTab: str = soup.find('a', {'href': '#tab-reivews'}).text
review = reviewTab.split('(')[1].split(')')[0]
navbarBreadcrumbs: List[BeautifulSoup] = soup.find('nav', {'class': 'woocommerce-breadcrumb breadcrumbs '}).find_all('a')
category = navbarBreadcrumbs[1].text
USD = soup.find("div", {"class": "price-wrapper"}).text
reviewStats: str = soup.find("div", {"class": "star-rating"}).text
rating = reviewStats.split(' ')[1]
row = (
name,
describe,
lastSeen,
rules,
CVE,
MS,
review,
category,
shipFrom,
shipTo,
left,
escrow,
terms,
vendor,
sold,
addDate,
available,
endDate,
BTC,
USD,
rating,
success,
EURO
)
views = "-1" # 8 Product_Number_Of_Views
reviews = "-1" # 9 Product_Number_Of_Reviews
rating_item = "-1" # 10 Product_Rating
addDate = "-1" # 11 Product_AddedDate
BTC = "-1" # 12 Product_BTC_SellingPrice
USD = "-1" # 13 Product_USD_SellingPrice
EURO = "-1" # 14 Product_EURO_SellingPrice
sold = "-1" # 15 Product_QuantitySold
left = "-1" # 16 Product_QuantityLeft
shipFrom = "-1" # 17 Product_ShippedFrom
shipTo = "-1" # 18 Product_ShippedTo
image = "-1" # 19 Product_Image
vendor_image = "-1" # 20 Vendor_Image
# Finding Product Name
name = soup.find('h1', {'class': 'product-title product_title entry-title'}).text
name = name.replace('\n', ' ')
name = name.replace(",", "")
name = name.strip()
divmb = soup.findAll('div', {'class': "mb-1"})
# Finding Vendor
# no vendor
vendor = "MikesGrandStore"
# Finding the Product Rating
rating_item = soup.find('strong', {'class', 'rating'}).text
rating_item = rating_item.replace('\n', ' ')
rating_item = rating_item.replace(",", "")
rating_item = rating_item.strip()
# Finding Number of Product Reviews
review_container = soup.find('li', {'id': 'tab-title-reviews'})
reviews = review_container.find('a').text
reviews = reviews.replace('Reviews', '')
reviews = reviews.replace('(', '')
reviews = reviews.replace(')', '')
reviews = reviews.replace('\n', ' ')
reviews = reviews.replace(",", "")
reviews = reviews.strip()
# Finding Prices
USD = soup.find('span', {'class': 'woocommerce-Price-currencySymbol'}).next_sibling
USD = USD.replace('\n', ' ')
USD = USD.replace(",", "")
USD = USD.strip()
# Finding the Product Category
cat_container = soup.find('span', {'class': 'posted_in'})
cat = cat_container.findAll('a')
category = ""
for name in cat:
category = category + " " + name.text
# Finding the Product Quantity Available
stock = soup.find('p', {'class': 'stock in-stock'})
if stock is not None:
left = stock.text
left = left.replace("in stock", "")
left = left.strip()
# Finding the Product description
desc_cont = soup.find('div', {'class': 'product-short-description'})
describe = desc_cont.find('p').text.strip()
# Finding Product Image
image = soup.find('img', {'class': 'wp-post-image skip-lazy'})
image = image.get('src')
image = image.split('base64,')[-1]
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if cve:
CVE = " "
for idx in cve:
CVE += (idx)
CVE += " "
CVE = CVE.replace(',', ' ')
CVE = CVE.replace('\n', '')
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
if ms:
MS = " "
for im in ms:
MS += (im)
MS += " "
MS = MS.replace(',', ' ')
MS = MS.replace('\n', '')
# Populating the final variable (this should be a list with all fields scraped)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
# Sending the results
return row
def mikesGrandStore_listing_parser(soup: BeautifulSoup) -> List:
# Fields to be parsed
nm = 0 # Total_Products (Should be Integer)
mktName = "MikesGrandStore" # 0 Marketplace_Name
name = [] # 1 Product_Name
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 3 Product_MS_Classification (Microsoft Security)
category = [] # 4 Product_Category
describe = [] # 5 Product_Description
escrow = [] # 6 Vendor_Warranty
views = [] # 7 Product_Number_Of_Views
reviews = [] # 8 Product_Number_Of_Reviews
addDate = [] # 9 Product_AddDate
lastSeen = [] # 10 Product_LastViewDate
BTC = [] # 11 Product_BTC_SellingPrice
USD = [] # 12 Product_USD_SellingPrice
EURO = [] # 13 Product_EURO_SellingPrice
sold = [] # 14 Product_QuantitySold
qLeft =[] # 15 Product_QuantityLeft
shipFrom = [] # 16 Product_ShippedFrom
shipTo = [] # 17 Product_ShippedTo
vendor = [] # 18 Vendor
rating = [] # 19 Vendor_Rating
success = [] # 20 Vendor_Successful_Transactions
href = [] # 23 Product_Links (Urls)
pass
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
#stores info it needs in different lists, these lists are returned after being organized
#@param: soup object looking at html page of listing page
#return: 'row' that contains a variety of lists that each hold info on the listing page
def darkfox_listing_parser(soup):
# parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
# stores info it needs in different lists, these lists are returned after being organized
# @param: soup object looking at html page of listing page
# return: 'row' that contains a variety of lists that each hold info on the listing page
def MikesGrandStore_listing_parser(soup):
# Fields to be parsed
nm = 0 # Total_Products (Should be Integer)
mktName = "DarkFox" # 0 Marketplace_Name
name = [] # 1 Product_Name
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 3 Product_MS_Classification (Microsoft Security)
category = [] # 4 Product_Category
describe = [] # 5 Product_Description
escrow = [] # 6 Vendor_Warranty
views = [] # 7 Product_Number_Of_Views
reviews = [] # 8 Product_Number_Of_Reviews
addDate = [] # 9 Product_AddDate
lastSeen = [] # 10 Product_LastViewDate
BTC = [] # 11 Product_BTC_SellingPrice
USD = [] # 12 Product_USD_SellingPrice
EURO = [] # 13 Product_EURO_SellingPrice
sold = [] # 14 Product_QuantitySold
qLeft =[] # 15 Product_QuantityLeft
shipFrom = [] # 16 Product_ShippedFrom
shipTo = [] # 17 Product_ShippedTo
vendor = [] # 18 Vendor
rating = [] # 19 Vendor_Rating
success = [] # 20 Vendor_Successful_Transactions
href = [] # 23 Product_Links (Urls)
listing = soup.findAll('div', {"class": "card"})
nm = 0 # *Total_Products (Should be Integer)
mktName = "MikesGrandStore" # 0 *Marketplace_Name
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating
success = [] # 3 Vendor_Successful_Transactions
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description
views = [] # 9 Product_Number_Of_Views
reviews = [] # 10 Product_Number_Of_Reviews
rating_item = [] # 11 Product_Rating
addDate = [] # 12 Product_AddDate
BTC = [] # 13 Product_BTC_SellingPrice
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice
sold = [] # 16 Product_QuantitySold
qLeft = [] # 17 Product_QuantityLeft
shipFrom = [] # 18 Product_ShippedFrom
shipTo = [] # 19 Product_ShippedTo
image = [] # 20 Product_Image
image_vendor = [] # 21 Vendor_Image
href = [] # 22 Product_Links
listing_container = soup.find('div', {'class': 'products row row-small large-columns-3 medium-columns-3 small-columns-2 equalize-box'})
listing = listing_container.findAll('div', recursive=False)
# Populating the Number of Products
nm = len(listing)
for a in listing:
bae = a.findAll('a', href=True)
lb = a.findAll('div', {"id": "littlebox"})
# Adding the url to the list of urls
link = bae[0].get('href')
link = cleanLink(link)
link = a.find('a', {'class': 'woocommerce-LoopProduct-link woocommerce-loop-product__link'}).get('href')
href.append(link)
# Finding the Product
product = bae[1].find('p').text
product = a.find('a', {'class': 'woocommerce-LoopProduct-link woocommerce-loop-product__link'}).text
product = product.replace('\n', ' ')
product = product.replace(",", "")
product = product.replace("...", "")
product = product.strip()
name.append(product)
bae = a.find('div', {'class': "media-content"}).find('div').find_all('div')
if len(bae) >= 5:
# Finding Prices
price = bae[0].text
ud = price.replace(" USD", " ")
# u = ud.replace("$","")
u = ud.replace(",", "")
u = u.strip()
USD.append(u)
# bc = (prc[1]).strip(' BTC')
# BTC.append(bc)
# Finding the Vendor
vendor_name = bae[1].find('a').text
vendor_name = vendor_name.replace(",", "")
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding the Category
cat = bae[2].find('small').text
cat = cat.replace("Category: ", "")
cat = cat.replace(",", "")
cat = cat.strip()
category.append(cat)
# Finding Number Sold and Quantity Left
num = bae[3].text
num = num.replace("Sold: ", "")
num = num.strip()
sold.append(num)
quant = bae[4].find('small').text
quant = quant.replace("In stock: ", "")
quant = quant.strip()
qLeft.append(quant)
# Finding Successful Transactions
freq = bae[1].text
freq = freq.replace(vendor_name, "")
freq = re.sub(r'Vendor Level \d+', "", freq)
freq = freq.replace("(", "")
freq = freq.replace(")", "")
freq = freq.strip()
success.append(freq)
# Finding Product Image
product_image = a.find('img', {'class': 'attachment-woocommerce_thumbnail size-woocommerce_thumbnail'})
product_image = product_image.get('src')
product_image = product_image.split('base64,')[-1]
image.append(product_image)
# Finding Prices
price = a.find('span', {'class': 'woocommerce-Price-currencySymbol'}).next_sibling
price = price.strip()
USD.append(price)
# Finding the Vendor
vendor_name = "MikesGrandStore"
vendor.append(vendor_name)
image_vendor.append("-1")
# Finding the Category
cat = a.find('p', {'class': 'category uppercase is-smaller no-text-overflow product-cat op-7'}).text
cat = cat.replace("class:", "")
cat = cat.strip()
category.append(cat)
# Finding product rating
rating = a.find('strong', {'class': 'rating'}).text
rating = rating.strip()
rating_item.append(rating)
# Searching for CVE and MS categories
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue="-1"
cveValue = "-1"
else:
cee = " "
for idx in cve:
@ -225,12 +213,12 @@ def darkfox_listing_parser(soup):
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cveValue=cee
cveValue = cee
CVE.append(cveValue)
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue="-1"
MSValue = "-1"
else:
me = " "
for im in ms:
@ -238,27 +226,34 @@ def darkfox_listing_parser(soup):
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue=me
MSValue = me
MS.append(MSValue)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen,
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)
#called by the crawler to get description links on a listing page
#@param: beautifulsoup object that is using the correct html page (listing page)
#return: list of description links from a listing page
def mikesgrandstore_links_parser(soup):
# called by the crawler to get description links on a listing page
# @param: beautifulsoup object that is using the correct html page (listing page)
# return: list of description links from a listing page
def MikesGrandStore_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
listing = soup.findAll('div', {"class": "box-image"})
container = soup.find('div', {"class": "products row row-small large-columns-3 medium-columns-3 small-columns-2 equalize-box"})
listing = container.findAll('div', recursive=False)
# for a in listing:
# bae = a.find('a', {"class": "text-info"}, href=True)
# link = bae['href']
# href.append(link)
for a in listing:
bae = a.find('div', {"class": "image-fade_in_back"}).find('a', href=True)
link = bae['href']
bae = a.findAll('a', href=True)
# Adding the url to the list of urls
link = bae[0].get('href')
href.append(link)
return href
return href

+ 352
- 0
MarketPlaces/TheDarkMarket/crawler_selenium.py View File

@ -0,0 +1,352 @@
__author__ = 'DarkWeb'
'''
Royal Marketplace Crawler (Selenium)
'''
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from PIL import Image
import urllib.parse as urlparse
import os, re, time
from datetime import date
import subprocess
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.TheDarkMarket.parser import darkmarket_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
counter = 1
baseURL = 'http://dark3xolguutzr2cn5twjyu6c3db2z3ai3aqyqascml5cdrleh3s2hqd.onion/'
# Opens Tor Browser, crawls the website
def startCrawling():
marketName = getMarketName()
driver = getAccess()
if driver != 'down':
try:
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closeDriver(driver)
new_parse(marketPlace=marketName, url=baseURL, createLog=True)
def captcha(driver):
'''
# wait for captcha page
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div[2]/div/div/div/div/form/div/div[2]/button")))
inputChars = driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div/div/form/div/div[2]/div[1]/input')
inputNum = driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div/div/form/div/div[2]/div[2]/input')
driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div/div/form/div/div[1]/div/div').screenshot(
r'..\Royal\captcha1.png')
im = Image.open(r'..\Royal\captcha1.png')
im.show()
chars = input("Enter characters: ")
inputChars.send_keys(chars)
num = input("Enter number of wrong puzzle pieces: ")
inputNum.send_keys(num)
# click the verify(submit) button
driver.find_element(by=By.XPATH, value="/html/body/div[2]/div/div/div/div/form/div/div[2]/button").click()
'''
input("Press ENTER when CAPTCHA is completed\n")
# wait for login page
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div[2]/div/div/div[2]/h1")))
'''
temp = driver.find_element(by=By.XPATH, value='/html/body/div/div/form/div[1]')
boxes = temp.find_elements(by=By.TAG_NAME, value='input')
for box in boxes:
# click box to update captcha image
box.click()
# save clock captcha to local
time.sleep(1)
driver.find_element(by=By.XPATH, value='/html/body/div/div/form/div[1]/div').screenshot(
r'..\Royal\captcha1.png')
im = Image.open(r'..\Royal\captcha1.png')
im.show()
letter = input("Enter letter: ")
box.send_keys(letter)
# click the verify(submit) button
driver.find_element(by=By.XPATH, value="/html/body/div/div/form/button[1]").click()
# wait for login page
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div[1]/div/div/div[2]/form/input[3]")))
'''
# Login using premade account credentials and do login captcha manually
def login(driver):
# wait for login page
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div[2]/div/div/div[2]/form/div[4]")))
# entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
# Username here
usernameBox.send_keys('blabri')
passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]')
# Password here
passwordBox.send_keys('fishowal')
# click "Login"
driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div[2]/form/div[4]').click()
'''
# wait for captcha page show up
time.sleep(3)
# save captcha to local
driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div[2]/form/div[4]/label/div/div').screenshot(
r'..\Royal\captcha2.png')
# This method will show image in any image viewer
im = Image.open(r'..\Royal\captcha2.png')
im.show()
# ask user input captcha solution in terminal
userIn = input("Enter location of wrong pieces (squares are numbered 1-24 left to right, # # #): ")
squares = userIn.split()
# send user solution into the input space
for id in squares:
driver.find_element(by=By.XPATH, value='//*[@id="cl[' + str((int(id)-1)) + ']"]').click()
# click the verify(submit) button
driver.find_element(by=By.XPATH, value="/html/body/div[2]/div/div/div[2]/form/div[4]/label/div/div/div/button").click()
'''
input("Press ENTER when CAPTCHA is completed\n")
# wait for listing page show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
(By.XPATH, '/html/body/div[3]/div/div[5]/div[1]')))
# Returns the name of the website
def getMarketName():
name = 'TheDarkMarket'
return name
# Return the link of the website
def getFixedURL():
url = 'http://dark3xolguutzr2cn5twjyu6c3db2z3ai3aqyqascml5cdrleh3s2hqd.onion/'
return url
# Closes Tor Browser
def closeDriver(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.close()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
from MarketPlaces.Initialization.markets_mining import config
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
# ff_prof.set_preference("network.dns.disablePrefetch", True)
# ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
driver.maximize_window()
return driver
def getAccess():
url = getFixedURL()
driver = createFFDriver()
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
# Saves the crawled html page
def savePage(driver, page, url):
cleanPage = cleanHTML(driver, page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
# Gets the full path of the page to be saved along with its appropriate file name
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMarketName() + "/HTML_Pages")
fileName = getNameFromURL(url)
if not isListingLink(url):
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
else:
fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
return fullPath
# Creates the file name from passed URL
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if name == '':
name = str(counter)
counter = counter + 1
return name
def getInterestedLinks():
links = []
# Digital - Fraud Software
links.append(baseURL + 'product-category/hacking/')
# # Digital - Guides and Tutorials
# links.append('http://royalrnpvfbodtt5altnnzano6hquvn2d5qy55oofc2zyqciogcevrad.onion/category/Guides%20&%20Tutorials')
# # Digital - Legitimate Software
# links.append('http://royalrnpvfbodtt5altnnzano6hquvn2d5qy55oofc2zyqciogcevrad.onion/category/Legitimiate%20Software')
# # Services - Carding
# links.append('http://royalrnpvfbodtt5altnnzano6hquvn2d5qy55oofc2zyqciogcevrad.onion/category/Carding')
return links
def crawlForum(driver):
print("Crawling The Dark Market")
linksToCrawl = getInterestedLinks()
i = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
has_next_page = True
count = 0
while has_next_page:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(driver, html, link)
list = productPages(html)
for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver, driver.page_source, item)
driver.back()
# comment out
# break
# comment out
# if count == 1:
# break
# Try finding next page
try:
nav = driver.find_element(by=By.XPATH, value='/html/body/div[2]/div/div/div[1]/div[2]/nav')
li = nav.find_elements(by=By.TAG_NAME, value='li')
a = li[-1].find_element(by=By.TAG_NAME, value='a')
link = a.get_attribute('href')
if link == "":
raise NoSuchElementException
count += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
input("Crawling Royal forum done sucessfully. Press ENTER to continue\n")
# Returns 'True' if the link is Topic link
def isDescriptionLink(url):
if '/product/' in url:
return True
return False
# Returns True if the link is a listingPage link
def isListingLink(url):
if 'category' in url:
return True
return False
# calling the parser to define the links
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return darkmarket_links_parser(soup)
def crawler():
startCrawling()
# print("Crawling and Parsing BestCardingWorld .... DONE!")

+ 182
- 0
MarketPlaces/TheDarkMarket/parser.py View File

@ -0,0 +1,182 @@
__author__ = 'DarkWeb'
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup, ResultSet, Tag
# This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
def darkmarket_description_parser(soup: BeautifulSoup):
# Fields to be parsed
vendor = "-1" # 0 *Vendor_Name
success = "-1" # 1 Vendor_Successful_Transactions
rating_vendor = "-1" # 2 Vendor_Rating
name = "-1" # 3 *Product_Name
describe = "-1" # 4 Product_Description
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category
views = "-1" # 8 Product_Number_Of_Views
reviews = "-1" # 9 Product_Number_Of_Reviews
rating_item = "-1" # 10 Product_Rating
addDate = "-1" # 11 Product_AddedDate
BTC = "-1" # 12 Product_BTC_SellingPrice
USD = "-1" # 13 Product_USD_SellingPrice
EURO = "-1" # 14 Product_EURO_SellingPrice
sold = "-1" # 15 Product_QuantitySold
left = "-1" # 16 Product_QuantityLeft
shipFrom = "-1" # 17 Product_ShippedFrom
shipTo = "-1" # 18 Product_ShippedTo
image = "-1"
image_vendor = "-1"
details: Tag = soup.find("div", {"class": "wc-content"})
vendor = details.find("div", {"class": "product_meta"}).find("a", {"class": "wcvendors_cart_sold_by_meta"}).text
name = details.find("h1", {"class": "product_title entry-title"}).text
describe_list = [
elem.text for elem in
details.find("div", {"id": "tab-description"}).find_all()
if elem.name != "h2"
]
describe = " ".join(describe_list)
categories_list: ResultSet[Tag] = details.find("span", {"class": "posted_in"}).find_all("a")
category = "Hacking"
reviews = details.find("div", {"class": "review-link"}).get("title")
rating_item = details.find("div", {"class": "star-rating"}).get('title')
price_container = details.find("p", {"class": "price"})
if not price_container.find("ins"):
USD = price_container.find("span", {"class": "woocommerce-Price-amount amount"}).text.replace("$", "")
else:
USD = price_container.find("ins").find("span", {"class": "woocommerce-Price-amount amount"}).text.replace("$", "")
# print(f"\n[desc] Product: {name}")
# print(f"[desc] Price: ${USD}\n")
# Populating the final variable (this should be a list with all fields scraped)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo, image, image_vendor)
# Sending the results
return row
# This is the method to parse the Listing Pages
def darkmarket_listing_parser(soup: BeautifulSoup):
# Fields to be parsed
nm = 0 # *Total_Products (Should be Integer)
mktName = "TheDarkMarket" # 0 *Marketplace_Name
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating
success = [] # 3 Vendor_Successful_Transactions
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 6 Product_MS_Classification (Microsoft Security)
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description
views = [] # 9 Product_Number_Of_Views
reviews = [] # 10 Product_Number_Of_Reviews
rating_item = [] # 11 Product_Rating
addDate = [] # 12 Product_AddDate
BTC = [] # 13 Product_BTC_SellingPrice
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice
sold = [] # 16 Product_QuantitySold
qLeft =[] # 17 Product_QuantityLeft
shipFrom = [] # 18 Product_ShippedFrom
shipTo = [] # 19 Product_ShippedTo
image = []
image_vendor = []
href = [] # 20 Product_Links
products_list: ResultSet[Tag] = soup.find("ul", {"class": "products columns-3"}).find_all("li")
for product in products_list:
nm += 1
product_vendor = product.find("small", {"class": "wcvendors_sold_by_in_loop"}).find("a").text
vendor.append(cleanString(product_vendor))
# rating_vendor.append("-1")
# success.append("-1")
product_name = product.find("h2", {"class": "woocommerce-loop-product__title"}).text
name.append(cleanString(product_name))
# CVE.append("-1")
# MS.append("-1")
product_category = product.find("div", {"class": 'product-categories'}).text
category.append(cleanString(product_category))
# describe.append("-1")
# views.append("-1")
# reviews.append("-1")
product_rating = product.find("div", {"class": "star-rating"}).get("title")
rating_item.append(cleanString(product_rating))
# addDate.append(datetime.now().strftime("%m/%d/%Y "))
# BTC.append("-1")
price_container = product.find("span", {"class": "price"})
if not price_container.find("ins"):
product_price = price_container.find("span", {"class": "woocommerce-Price-amount amount"}).text.replace("$", "")
else:
product_price = price_container.find("ins").find("span", {"class": "woocommerce-Price-amount amount"}).text.replace("$", "")
USD.append(cleanNumbers(product_price))
# EURO.append("-1")
# sold.append("-1")
# qLeft.append("-1")
# shipTo.append("-1")
# shipFrom.append("-1")
product_href = product.find("a", {"class": "woocommerce-LoopProduct-link woocommerce-loop-product__link"}).get("href")
href.append(product_href)
# print(f"\n[list] Product: {product_name}")
# print(f"[list] Links: ${product_href}\n")
product_images_list = product.find("a", {"class": "tf-loop-product-thumbs-link"}).find("img").get("data-srcset").split(" ")
product_image = product_images_list[0]
image.append(product_image)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image,
image_vendor)
def darkmarket_links_parser(soup: BeautifulSoup):
# Returning all links that should be visited by the Crawler
href = []
listing: ResultSet[Tag] = soup.find("ul", {"class": "products columns-3"}).find_all("li")
for li in listing:
a = li.find('a', {"class": "woocommerce-LoopProduct-link woocommerce-loop-product__link"})
link = a.get('href')
href.append(link)
print(f"Links: {href}")
return href

+ 1
- 2
MarketPlaces/Utilities/utilities.py View File

@ -252,8 +252,7 @@ def organizeProducts(marketplace, nm, vendor, rating_vendor, success_vendor, nom
lne = marketplace # 0
lne += ","
# Added for CityMarket
lne += "=1" if len(vendor) == 0 else vendor[n] # 1
lne += "-1" if len(vendor) == 0 else vendor[n] # 1
lne += ","
lne += "-1" if len(rating_vendor) == 0 else rating_vendor[n] # 2
lne += ","


+ 20
- 25
MarketPlaces/WeTheNorth/crawler_selenium.py View File

@ -14,6 +14,8 @@ from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from PIL import Image
import codecs
import socks, socket, time
import urllib.parse as urlparse
import os, re, time
from datetime import date
@ -22,6 +24,7 @@ from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.WeTheNorth.parser import wethenorth_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
import selenium
counter = 1
baseURL = 'http://hn2paw7zaahbikbejiv6h22zwtijlam65y2c77xj2ypbilm2xs4bnbid.onion'
@ -40,25 +43,24 @@ def startCrawling():
print(driver.current_url, e)
closeDriver(driver)
new_parse(marketName, False)
new_parse(marketName, baseURL, True)
# Login using premade account credentials and do login captcha manually
def login(driver):
time.sleep(3)
#wait for login page
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div/div[2]/div[2]/div/div[3]/form/div[1]/input")))
input("Press ENTER when CAPTCHA is completed\n")
#entering username and password into input boxes
usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div[2]/div/div[3]/form/div[1]/input')
usernameBox = driver.find_element(by=By.XPATH, value='//input[@name="login"]')
#Username here
usernameBox.send_keys('blabri')
passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div[2]/div/div[3]/form/div[2]/input')
passwordBox = driver.find_element(by=By.XPATH, value='//input[@name="pass"]')
#Password here
passwordBox.send_keys('fishowal')
'''
# wait for captcha page show up
WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
(By.XPATH, "/html/body/div/div[2]/div[2]/div/div[3]/form/div[3]/div/img")))
@ -83,27 +85,27 @@ def login(driver):
# click the verify(submit) button
driver.find_element(by=By.XPATH, value="/html/body/div/div[2]/div[2]/div/div[3]/form/div[5]/input").click()
'''
input("Press ENTER when CAPTCHA is completed\n")
# wait for listing page show up (This Xpath may need to change based on different seed url)
WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
(By.XPATH, '//*[@id="information"]')))
# Returns the name of the website
def getMarketName():
name = 'WeTheNorth'
return name
def getMKTName() -> str:
name = 'WeTheNorth'
return name
# Return the link of the website
def getFixedURL():
url = 'http://hn2paw7zaahbikbejiv6h22zwtijlam65y2c77xj2ypbilm2xs4bnbid.onion'
return url
# Closes Tor Browser
def closeDriver(driver):
# global pid
@ -123,13 +125,13 @@ def createFFDriver():
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
# ff_prof.set_preference("places.history.enabled", False)
# ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
# ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
# ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
# ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
# ff_prof.set_preference("signon.rememberSignons", False)
# ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
# ff_prof.set_preference("network.dns.disablePrefetch", True)
# ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
@ -206,7 +208,6 @@ def getInterestedLinks():
# Software and Malware
links.append('http://hn2paw7zaahbikbejiv6h22zwtijlam65y2c77xj2ypbilm2xs4bnbid.onion/items.php?category=10')
return links
@ -243,12 +244,6 @@ def crawlForum(driver):
savePage(driver, driver.page_source, item)
driver.back()
# comment out
break
# comment out
if count == 1:
break
try:
nav = driver.find_element(by=By.XPATH, value=
@ -293,4 +288,4 @@ def productPages(html):
def crawler():
startCrawling()
# print("Crawling and Parsing BestCardingWorld .... DONE!")
# print("Crawling and Parsing BestCardingWorld .... DONE!")

+ 6
- 5
MarketPlaces/WeTheNorth/parser.py View File

@ -47,11 +47,11 @@ def wethenorth_description_parser(soup):
vendor = vendor.strip()
# Finding Vendor Rating
# rating = listDes.find('span',{'class':'levelSet'})
# rating = rating.text
# rating = rating.replace('\n', ' ')
# rating = rating.replace(",", "")
# rating = rating.strip()
rating = listDes.find('span',{'class':'levelSet'})
rating = rating.text
rating = rating.replace('\n', ' ')
rating = rating.replace(",", "")
rating = rating.strip()
# Finding Successful Transactions
success = listDes.find_all('p')[1]
@ -92,6 +92,7 @@ def wethenorth_description_parser(soup):
describe = describe.replace("\r", " ")
describe = describe.strip()
# cannot find any tag for these
'''
# Finding the Number of Product Reviews
tag = soup.findAll(text=re.compile('Reviews'))


+ 4
- 4
setup.ini View File

@ -1,11 +1,11 @@
[TOR]
firefox_binary_path = C:\Users\calsyslab\Desktop\Tor Browser\Browser\firefox.exe
firefox_profile_path = C:\Users\calsyslab\Desktop\Tor Browser\Browser\TorBrowser\Data\Browser\profile.default
geckodriver_path = C:\calsyslab\Project\dw_pipeline_test\selenium\geckodriver.exe
firefox_binary_path = C:\Users\minhkhoitran\Desktop\Tor Browser\Browser\firefox.exe
firefox_profile_path = C:\Users\minhkhoitran\Desktop\Tor Browser\Browser\TorBrowser\Data\Browser\profile.default
geckodriver_path = C:\nsf-reu\dw_pipeline_test\selenium\geckodriver.exe
[Project]
project_directory = C:\calsyslab\Project\dw_pipeline_test
project_directory = C:\nsf-reu\dw_pipeline_test
shared_folder = \\VBoxSvr\Shared
[PostgreSQL]


Loading…
Cancel
Save