@ -1,4 +1,4 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="ProjectRootManager" version="2" project-jdk-name="C:\ProgramData\Anaconda3" project-jdk-type="Python SDK" /> | |||
<component name="ProjectRootManager" version="2" project-jdk-name="C:\Users\Helium\anaconda3" project-jdk-type="Python SDK" /> | |||
</project> |
@ -0,0 +1,257 @@ | |||
__author__ = '91Shadows' | |||
''' | |||
CryptBB Crawler (Mechanize) | |||
''' | |||
import codecs, os, re | |||
import socks, socket, time | |||
from datetime import date | |||
import urllib.parse as urlparse | |||
import http.client as httplib | |||
import mechanize | |||
import subprocess | |||
from bs4 import BeautifulSoup | |||
from Forums.Initialization.prepare_parser import new_parse | |||
from Forums.BestCardingWorld.parser import bestcardingworld_links_parser | |||
counter = 1 | |||
httplib.HTTPConnection._http_vsn = 10 | |||
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0' | |||
baseURL = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=42&sid=ee2cbfd73c12923d979790b2bb4bdfd5' | |||
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150) | |||
# Opens Tor Browser, crawls the website | |||
def startCrawling(): | |||
opentor() | |||
getUrl() | |||
forumName = getForumName() | |||
br = getAccess() | |||
if br != 'down': | |||
crawlForum(br) | |||
new_parse(forumName, False) | |||
# new_parse(forumName, False) | |||
closetor() | |||
# Opens Tor Browser | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
path = open('../../path.txt').readline() | |||
pro = subprocess.Popen(path) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input("Tor Connected. Press ENTER to continue\n") | |||
return | |||
# Creates a connection through Tor Port | |||
def getUrl(timeout=None): | |||
socket.socket = socks.socksocket | |||
socket.create_connection = create_connection | |||
return | |||
# Makes the onion address request | |||
def create_connection(address, timeout=None, source_address=None): | |||
sock = socks.socksocket() | |||
sock.connect(address) | |||
return sock | |||
# Returns the name of website | |||
def getForumName(): | |||
name = 'CryptBB' | |||
return name | |||
# Return the link of website | |||
def getFixedURL(): | |||
url = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=42&sid=ee2cbfd73c12923d979790b2bb4bdfd5' | |||
return url | |||
# Closes Tor Browser | |||
def closetor(): | |||
global pid | |||
os.system("taskkill /pid " + str(pid)) | |||
print('Closing Tor...') | |||
time.sleep(3) | |||
return | |||
# Creates a Mechanize browser and initializes its options | |||
def createBrowser(): | |||
br = mechanize.Browser() | |||
cj = mechanize.CookieJar() | |||
br.set_cookiejar(cj) | |||
# Browser options | |||
br.set_handle_equiv(True) | |||
br.set_handle_redirect(True) | |||
br.set_handle_referer(True) | |||
br.set_handle_robots(False) | |||
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) | |||
br.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'), | |||
('Accept', '*/*')] | |||
return br | |||
def getAccess(): | |||
url = getFixedURL() | |||
br = createBrowser() | |||
try: | |||
br.open(url) | |||
return br | |||
except: | |||
return 'down' | |||
# Saves the crawled html page | |||
def savePage(page, url): | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
a = page.read() | |||
open(filePath, "wb").write(a) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
def getFullPathName(url): | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = 'C:/Users/CALSysLab/Documents/threatIntelligence-main/DarkWebMining_Working/Forums/BestCardingWorld/HTML_Pages/' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + '/' + 'Description/' + fileName + '.html' | |||
else: | |||
fullPath = 'C:/Users/CALSysLab/Documents/threatIntelligence-main/DarkWebMining_Working/Forums/BestCardingWorld/HTML_Pages/' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + '/' + 'Listing/' + fileName + '.html' | |||
return fullPath | |||
# Creates the name of the file based on URL | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# Hacking and Markets related topics | |||
def getInterestedLinks(): | |||
links = [] | |||
links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=43&sid=e12864ffccc5df877b03b573534955be') | |||
return links | |||
# Start crawling Forum pages | |||
def crawlForum(br): | |||
print("Crawling CryptBB forum") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
page = br.open(link) | |||
savePage(page, link) | |||
res = br.response().read() | |||
soup = BeautifulSoup(res, 'html.parser') | |||
next_link = soup.find("a", {"rel": "next"}) | |||
if next_link != None: | |||
full_url = urlparse.urljoin(linksToCrawl[i], next_link['href']) | |||
linksToCrawl.insert(i + 1, full_url) | |||
listOfTopics = findDescriptionPages(link) | |||
for topic in listOfTopics: | |||
itemPage = br.open(str(topic)) | |||
savePage(itemPage, topic) | |||
except Exception as e: | |||
print('Error getting link: ', link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("CryptBB forum done sucessfully. Press ENTER to continue\n") | |||
return | |||
# Returns True if the link is 'Topic' Links, may need to change for diff websites | |||
def isDescriptionLink(url): | |||
if 'topic' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link, may need to change for diff websites | |||
def isListingLink(url): | |||
''' | |||
reg = 'board=[0-9]+.[0-9]+\Z' | |||
if len(re.findall(reg, url)) == 0: | |||
return False | |||
return True | |||
''' | |||
if 'forum' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links | |||
def findDescriptionPages(url): | |||
soup = "" | |||
error = False | |||
try: | |||
html = codecs.open( | |||
"C:\\Users\\CALSysLab\\Documents\\threatIntelligence-main\\DarkWebMining_Working\\Forums\\BestCardingWorld\\HTML_Pages\\" + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + "\\Listing\\" + getNameFromURL(url) + ".html", encoding='utf8') | |||
soup = BeautifulSoup(html, "html.parser") | |||
except: | |||
try: | |||
html = open( | |||
"C:\\Users\\CALSysLab\\Documents\\threatIntelligence-main\\DarkWebMining_Working\\Forums\\BestCardingWorld\\HTML_Pages\\" + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + "\\Listing\\" + getNameFromURL(url) + ".html") | |||
soup = BeautifulSoup(html, "html.parser") | |||
except: | |||
error = True | |||
print("There was a problem to read the file " + getNameFromURL(url) + " in the listing section.") | |||
if not error: | |||
return bestcardingworld_links_parser(soup) | |||
else: | |||
return [] | |||
def crawler(): | |||
startCrawling() | |||
print("Crawling and Parsing CryptBB .... DONE!") |
@ -1 +1 @@ | |||
Tor2door | |||
ThiefWorld |
@ -0,0 +1,312 @@ | |||
__author__ = 'Helium' | |||
''' | |||
ThiefWorld Forum Crawler (Selenium) | |||
''' | |||
from selenium import webdriver | |||
from selenium.common.exceptions import NoSuchElementException | |||
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile | |||
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary | |||
from selenium.webdriver.firefox.service import Service | |||
from selenium.webdriver.support.ui import WebDriverWait | |||
from selenium.webdriver.support import expected_conditions as EC | |||
from selenium.webdriver.common.by import By | |||
from PIL import Image | |||
import urllib.parse as urlparse | |||
import os, time | |||
from datetime import date | |||
import subprocess | |||
from bs4 import BeautifulSoup | |||
from MarketPlaces.Initialization.prepare_parser import new_parse | |||
from MarketPlaces.ThiefWorld.parser import thiefworld_links_parser | |||
from MarketPlaces.Utilities.utilities import cleanHTML | |||
counter = 1 | |||
baseURL = 'http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/' | |||
# Opens Tor Browser, crawls the website, then parses, then closes tor | |||
#acts like the main method for the crawler, another function at the end of this code calls this function later | |||
def startCrawling(): | |||
opentor() | |||
mktName = getMKTName() | |||
driver = getAccess() | |||
if driver != 'down': | |||
try: | |||
login(driver) | |||
crawlForum(driver) | |||
except Exception as e: | |||
print(driver.current_url, e) | |||
closetor(driver) | |||
#new_parse(mktName, False) | |||
# Opens Tor Browser | |||
#prompts for ENTER input to continue | |||
def opentor(): | |||
global pid | |||
print("Connecting Tor...") | |||
path = open('../../path.txt').readline().strip() | |||
pro = subprocess.Popen(path) | |||
pid = pro.pid | |||
time.sleep(7.5) | |||
input('Tor Connected. Press ENTER to continue\n') | |||
return | |||
# Returns the name of the website | |||
#return: name of site in string type | |||
def getMKTName(): | |||
name = 'TheifWorld' | |||
return name | |||
# Return the base link of the website | |||
#return: url of base site in string type | |||
def getFixedURL(): | |||
url = 'http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/' | |||
return url | |||
# Closes Tor Browser | |||
#@param: current selenium driver | |||
def closetor(driver): | |||
# global pid | |||
# os.system("taskkill /pid " + str(pro.pid)) | |||
# os.system("taskkill /t /f /im tor.exe") | |||
print('Closing Tor...') | |||
driver.close() | |||
time.sleep(3) | |||
return | |||
# Creates FireFox 'driver' and configure its 'Profile' | |||
# to use Tor proxy and socket | |||
def createFFDriver(): | |||
file = open('../../path.txt', 'r') | |||
lines = file.readlines() | |||
ff_binary = FirefoxBinary(lines[0].strip()) | |||
ff_prof = FirefoxProfile(lines[1].strip()) | |||
ff_prof.set_preference("places.history.enabled", False) | |||
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True) | |||
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True) | |||
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True) | |||
ff_prof.set_preference("signon.rememberSignons", False) | |||
ff_prof.set_preference("network.cookie.lifetimePolicy", 2) | |||
ff_prof.set_preference("network.dns.disablePrefetch", True) | |||
ff_prof.set_preference("network.http.sendRefererHeader", 0) | |||
ff_prof.set_preference("permissions.default.image", 2) | |||
ff_prof.set_preference("browser.download.folderList", 2) | |||
ff_prof.set_preference("browser.download.manager.showWhenStarting", False) | |||
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain") | |||
ff_prof.set_preference('network.proxy.type', 1) | |||
ff_prof.set_preference("network.proxy.socks_version", 5) | |||
ff_prof.set_preference('network.proxy.socks', '127.0.0.1') | |||
ff_prof.set_preference('network.proxy.socks_port', 9150) | |||
ff_prof.set_preference('network.proxy.socks_remote_dns', True) | |||
ff_prof.set_preference("javascript.enabled", False) | |||
ff_prof.update_preferences() | |||
service = Service(lines[2].strip()) | |||
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service) | |||
return driver | |||
#the driver 'gets' the url, attempting to get on the site, if it can't access return 'down' | |||
#return: return the selenium driver or string 'down' | |||
def getAccess(): | |||
url = getFixedURL() | |||
driver = createFFDriver() | |||
try: | |||
driver.get(url) | |||
return driver | |||
except: | |||
driver.close() | |||
return 'down' | |||
# Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha | |||
# then allows for manual solving of captcha in the terminal | |||
#@param: current selenium web driver | |||
def login(driver): | |||
# wait for page to show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.XPATH, "/html/body/div/div[1]/div/div[1]/div[1]/ul"))) | |||
temp = driver.find_element(By.XPATH, '/html/body/div/header/div[2]/div/nav/div[2]/a[1]').get_attribute( | |||
'href') # /html/body/div/div[2]/div/div[2]/div | |||
link = urlparse.urljoin(baseURL, str(temp)) | |||
driver.get(link) # open | |||
# wait for listing page show up (This Xpath may need to change based on different seed url) | |||
WebDriverWait(driver, 100).until(EC.visibility_of_element_located( | |||
(By.ID, "side-bar"))) | |||
# Saves the crawled html page, makes the directory path for html pages if not made | |||
def savePage(page, url): | |||
cleanPage = cleanHTML(page) | |||
filePath = getFullPathName(url) | |||
os.makedirs(os.path.dirname(filePath), exist_ok=True) | |||
open(filePath, 'wb').write(cleanPage.encode('utf-8')) | |||
return | |||
# Gets the full path of the page to be saved along with its appropriate file name | |||
#@param: raw url as crawler crawls through every site | |||
def getFullPathName(url): | |||
fileName = getNameFromURL(url) | |||
if isDescriptionLink(url): | |||
fullPath = r'..\ThiefWorld\HTML_Pages\\' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html' | |||
else: | |||
fullPath = r'..\ThiefWorld\HTML_Pages\\' + str( | |||
"%02d" % date.today().month) + str("%02d" % date.today().day) + str( | |||
"%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html' | |||
return fullPath | |||
# Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned | |||
#@param: raw url as crawler crawls through every site | |||
def getNameFromURL(url): | |||
global counter | |||
name = ''.join(e for e in url if e.isalnum()) | |||
if (name == ''): | |||
name = str(counter) | |||
counter = counter + 1 | |||
return name | |||
# returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list | |||
#in this example, there are a couple of categories some threads fall under such as | |||
# Guides and Tutorials, Digital Products, and Software and Malware | |||
#as you can see they are categories of products | |||
def getInterestedLinks(): | |||
links = [] | |||
# Hacking and DDOS | |||
links.append('http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/catalog/35') | |||
# # Carding Manuals | |||
links.append('http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/catalog/20') | |||
# # Software | |||
links.append('http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/catalog/37') | |||
# #Database | |||
links.append('http://qsw7iurcrdwyml5kg4oxbmtqrcnpxiag3iumdarefzeunnyc2dnyljad.onion/catalog/38') | |||
return links | |||
# gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through | |||
#topic and description pages are crawled through here, where both types of pages are saved | |||
#@param: selenium driver | |||
def crawlForum(driver): | |||
print("Crawling the ThiefWorld market") | |||
linksToCrawl = getInterestedLinks() | |||
visited = set(linksToCrawl) | |||
initialTime = time.time() | |||
count = 0 | |||
i = 0 | |||
while i < len(linksToCrawl): | |||
link = linksToCrawl[i] | |||
print('Crawling :', link) | |||
try: | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
has_next_page = True | |||
while has_next_page: | |||
list = productPages(html) | |||
for item in list: | |||
itemURL = urlparse.urljoin(baseURL, str(item)) | |||
try: | |||
driver.get(itemURL) | |||
except: | |||
driver.refresh() | |||
savePage(driver.page_source, item) | |||
driver.back() | |||
# # comment out | |||
# break | |||
# | |||
# # # comment out | |||
# if count == 1: | |||
# count = 0 | |||
# break | |||
try: | |||
link = driver.find_element(by=By.XPATH, value= | |||
'/html/body/div/div[1]/div/div/div[2]/div[3]/div/ul/li[13]/a').get_attribute('href') | |||
if link == "": | |||
raise NoSuchElementException | |||
try: | |||
driver.get(link) | |||
except: | |||
driver.refresh() | |||
html = driver.page_source | |||
savePage(html, link) | |||
count += 1 | |||
except NoSuchElementException: | |||
has_next_page = False | |||
except Exception as e: | |||
print(link, e) | |||
i += 1 | |||
# finalTime = time.time() | |||
# print finalTime - initialTime | |||
input("Crawling ThiefWorld forum done sucessfully. Press ENTER to continue\n") | |||
# Returns 'True' if the link is a description link | |||
#@param: url of any url crawled | |||
#return: true if is a description page, false if not | |||
def isDescriptionLink(url): | |||
if 'product' in url: | |||
return True | |||
return False | |||
# Returns True if the link is a listingPage link | |||
#@param: url of any url crawled | |||
#return: true if is a Listing page, false if not | |||
def isListingLink(url): | |||
if 'catalog' in url: | |||
return True | |||
return False | |||
# calling the parser to define the links, the html is the url of a link from the list of interested link list | |||
#@param: link from interested link list ie. getInterestingLinks() | |||
#return: list of description links that should be crawled through | |||
def productPages(html): | |||
soup = BeautifulSoup(html, "html.parser") | |||
return thiefworld_links_parser(soup) | |||
# Drop links that "signout" | |||
# def isSignOut(url): | |||
# #absURL = urlparse.urljoin(url.base_url, url.url) | |||
# if 'signout' in url.lower() or 'logout' in url.lower(): | |||
# return True | |||
# | |||
# return False | |||
def crawler(): | |||
startCrawling() | |||
# print("Crawling and Parsing BestCardingWorld .... DONE!") |
@ -0,0 +1,291 @@ | |||
__author__ = 'DarkWeb' | |||
# Here, we are importing the auxiliary functions to clean or convert data | |||
from MarketPlaces.Utilities.utilities import * | |||
# Here, we are importing BeautifulSoup to search through the HTML tree | |||
from bs4 import BeautifulSoup | |||
#parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of description page | |||
#return: 'row' that contains a variety of lists that each hold info on the description page | |||
def darkfox_description_parser(soup): | |||
# Fields to be parsed | |||
name = "-1" # 0 Product_Name | |||
describe = "-1" # 1 Product_Description | |||
lastSeen = "-1" # 2 Product_LastViewDate | |||
rules = "-1" # 3 NOT USED ... | |||
CVE = "-1" # 4 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = "-1" # 5 Product_MS_Classification (Microsoft Security) | |||
review = "-1" # 6 Product_Number_Of_Reviews | |||
category = "-1" # 7 Product_Category | |||
shipFrom = "-1" # 8 Product_ShippedFrom | |||
shipTo = "-1" # 9 Product_ShippedTo | |||
left = "-1" # 10 Product_QuantityLeft | |||
escrow = "-1" # 11 Vendor_Warranty | |||
terms = "-1" # 12 Vendor_TermsAndConditions | |||
vendor = "-1" # 13 Vendor_Name | |||
sold = "-1" # 14 Product_QuantitySold | |||
addDate = "-1" # 15 Product_AddedDate | |||
available = "-1" # 16 NOT USED ... | |||
endDate = "-1" # 17 NOT USED ... | |||
BTC = "-1" # 18 Product_BTC_SellingPrice | |||
USD = "-1" # 19 Product_USD_SellingPrice | |||
rating = "-1" # 20 Vendor_Rating | |||
success = "-1" # 21 Vendor_Successful_Transactions | |||
EURO = "-1" # 22 Product_EURO_SellingPrice | |||
# Finding Product Name | |||
name = soup.find('h1').text | |||
name = name.replace('\n', ' ') | |||
name = name.replace(",", "") | |||
name = name.strip() | |||
# Finding Vendor | |||
vendor = soup.find('h3').find('a').text.strip() | |||
# Finding Vendor Rating | |||
rating = soup.find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Successful Transactions | |||
success = soup.find('h3').text | |||
success = success.replace("Vendor: ", "") | |||
success = success.replace(vendor, "") | |||
success = success.replace("(", "") | |||
success = success.replace(")", "") | |||
success = success.strip() | |||
bae = soup.find('div', {'class': "box"}).find_all('ul') | |||
# Finding Prices | |||
USD = bae[1].find('strong').text.strip() | |||
li = bae[2].find_all('li') | |||
# Finding Escrow | |||
escrow = li[0].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Category | |||
category = li[1].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding the Product Quantity Available | |||
left = li[3].find('span', {'class': "tag is-dark"}).text.strip() | |||
# Finding Number Sold | |||
sold = li[4].find('span', {'class': "tag is-dark"}).text.strip() | |||
li = bae[3].find_all('li') | |||
# Finding Shipment Information (Origin) | |||
if "Ships from:" in li[-2].text: | |||
shipFrom = li[-2].text | |||
shipFrom = shipFrom.replace("Ships from: ", "") | |||
# shipFrom = shipFrom.replace(",", "") | |||
shipFrom = shipFrom.strip() | |||
# Finding Shipment Information (Destination) | |||
shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text | |||
shipTo = shipTo.replace("Ships to: ", "") | |||
shipTo = shipTo.strip() | |||
if "certain countries" in shipTo: | |||
countries = "" | |||
tags = li[-1].find_all('span', {'class': "tag"}) | |||
for tag in tags: | |||
country = tag.text.strip() | |||
countries += country + ", " | |||
shipTo = countries.strip(", ") | |||
# Finding the Product description | |||
describe = soup.find('div', {'class': "pre-line"}).text | |||
describe = describe.replace("\n", " ") | |||
describe = describe.strip() | |||
'''# Finding the Number of Product Reviews | |||
tag = soup.findAll(text=re.compile('Reviews')) | |||
for index in tag: | |||
reviews = index | |||
par = reviews.find('(') | |||
if par >=0: | |||
reviews = reviews.replace("Reviews (","") | |||
reviews = reviews.replace(")","") | |||
reviews = reviews.split(",") | |||
review = str(abs(int(reviews[0])) + abs(int(reviews[1]))) | |||
else : | |||
review = "-1"''' | |||
# Searching for CVE and MS categories | |||
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if cve: | |||
CVE = " " | |||
for idx in cve: | |||
CVE += (idx) | |||
CVE += " " | |||
CVE = CVE.replace(',', ' ') | |||
CVE = CVE.replace('\n', '') | |||
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if ms: | |||
MS = " " | |||
for im in ms: | |||
MS += (im) | |||
MS += " " | |||
MS = MS.replace(',', ' ') | |||
MS = MS.replace('\n', '') | |||
# Populating the final variable (this should be a list with all fields scraped) | |||
row = (name, describe, lastSeen, rules, CVE, MS, review, category, shipFrom, shipTo, left, escrow, terms, vendor, | |||
sold, addDate, available, endDate, BTC, USD, rating, success, EURO) | |||
# Sending the results | |||
return row | |||
#parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs | |||
#stores info it needs in different lists, these lists are returned after being organized | |||
#@param: soup object looking at html page of listing page | |||
#return: 'row' that contains a variety of lists that each hold info on the listing page | |||
def darkfox_listing_parser(soup): | |||
# Fields to be parsed | |||
nm = 0 # Total_Products (Should be Integer) | |||
mktName = "DarkFox" # 0 Marketplace_Name | |||
name = [] # 1 Product_Name | |||
CVE = [] # 2 Product_CVE_Classification (Common Vulnerabilities and Exposures) | |||
MS = [] # 3 Product_MS_Classification (Microsoft Security) | |||
category = [] # 4 Product_Category | |||
describe = [] # 5 Product_Description | |||
escrow = [] # 6 Vendor_Warranty | |||
views = [] # 7 Product_Number_Of_Views | |||
reviews = [] # 8 Product_Number_Of_Reviews | |||
addDate = [] # 9 Product_AddDate | |||
lastSeen = [] # 10 Product_LastViewDate | |||
BTC = [] # 11 Product_BTC_SellingPrice | |||
USD = [] # 12 Product_USD_SellingPrice | |||
EURO = [] # 13 Product_EURO_SellingPrice | |||
sold = [] # 14 Product_QuantitySold | |||
qLeft =[] # 15 Product_QuantityLeft | |||
shipFrom = [] # 16 Product_ShippedFrom | |||
shipTo = [] # 17 Product_ShippedTo | |||
vendor = [] # 18 Vendor | |||
rating = [] # 19 Vendor_Rating | |||
success = [] # 20 Vendor_Successful_Transactions | |||
href = [] # 23 Product_Links (Urls) | |||
listing = soup.findAll('div', {"class": "card"}) | |||
# Populating the Number of Products | |||
nm = len(listing) | |||
for a in listing: | |||
bae = a.findAll('a', href=True) | |||
# Adding the url to the list of urls | |||
link = bae[0].get('href') | |||
link = cleanLink(link) | |||
href.append(link) | |||
# Finding the Product | |||
product = bae[1].find('p').text | |||
product = product.replace('\n', ' ') | |||
product = product.replace(",", "") | |||
product = product.replace("...", "") | |||
product = product.strip() | |||
name.append(product) | |||
bae = a.find('div', {'class': "media-content"}).find('div').find_all('div') | |||
if len(bae) >= 5: | |||
# Finding Prices | |||
price = bae[0].text | |||
ud = price.replace(" USD", " ") | |||
# u = ud.replace("$","") | |||
u = ud.replace(",", "") | |||
u = u.strip() | |||
USD.append(u) | |||
# bc = (prc[1]).strip(' BTC') | |||
# BTC.append(bc) | |||
# Finding the Vendor | |||
vendor_name = bae[1].find('a').text | |||
vendor_name = vendor_name.replace(",", "") | |||
vendor_name = vendor_name.strip() | |||
vendor.append(vendor_name) | |||
# Finding the Category | |||
cat = bae[2].find('small').text | |||
cat = cat.replace("Category: ", "") | |||
cat = cat.replace(",", "") | |||
cat = cat.strip() | |||
category.append(cat) | |||
# Finding Number Sold and Quantity Left | |||
num = bae[3].text | |||
num = num.replace("Sold: ", "") | |||
num = num.strip() | |||
sold.append(num) | |||
quant = bae[4].find('small').text | |||
quant = quant.replace("In stock: ", "") | |||
quant = quant.strip() | |||
qLeft.append(quant) | |||
# Finding Successful Transactions | |||
freq = bae[1].text | |||
freq = freq.replace(vendor_name, "") | |||
freq = re.sub(r'Vendor Level \d+', "", freq) | |||
freq = freq.replace("(", "") | |||
freq = freq.replace(")", "") | |||
freq = freq.strip() | |||
success.append(freq) | |||
# Searching for CVE and MS categories | |||
cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}')) | |||
if not cve: | |||
cveValue="-1" | |||
else: | |||
cee = " " | |||
for idx in cve: | |||
cee += (idx) | |||
cee += " " | |||
cee = cee.replace(',', ' ') | |||
cee = cee.replace('\n', '') | |||
cveValue=cee | |||
CVE.append(cveValue) | |||
ms = a.findAll(text=re.compile('MS\d{2}-\d{3}')) | |||
if not ms: | |||
MSValue="-1" | |||
else: | |||
me = " " | |||
for im in ms: | |||
me += (im) | |||
me += " " | |||
me = me.replace(',', ' ') | |||
me = me.replace('\n', '') | |||
MSValue=me | |||
MS.append(MSValue) | |||
# Populate the final variable (this should be a list with all fields scraped) | |||
return organizeProducts(mktName, nm, name, CVE, MS, category, describe, escrow, views, reviews, addDate, lastSeen, | |||
BTC, USD, EURO, qLeft, shipFrom, shipTo, vendor, rating, success, sold, href) | |||
#called by the crawler to get description links on a listing page | |||
#@param: beautifulsoup object that is using the correct html page (listing page) | |||
#return: list of description links from a listing page | |||
def thiefworld_links_parser(soup): | |||
# Returning all links that should be visited by the Crawler | |||
href = [] | |||
listing = soup.find('div', {"class": "row tile__list tileitems_filter pad15 tileproduct__list"}).findAll('div', {"class": "desc"}) | |||
for a in listing: | |||
bae = a.find('div', {"class": "title"}).find('a', href=True) | |||
link = bae['href'] | |||
href.append(link) | |||
return href |