Browse Source

Added Robinhood Market crawler and parser

main
chris 1 year ago
parent
commit
beb4745093
4 changed files with 595 additions and 0 deletions
  1. +3
    -0
      MarketPlaces/Initialization/markets_mining.py
  2. +5
    -0
      MarketPlaces/Initialization/prepare_parser.py
  3. +277
    -0
      MarketPlaces/RobinhoodMarket/crawler_selenium.py
  4. +310
    -0
      MarketPlaces/RobinhoodMarket/parser.py

+ 3
- 0
MarketPlaces/Initialization/markets_mining.py View File

@ -22,6 +22,7 @@ from MarketPlaces.DarkMatter.crawler_selenium import crawler as crawlerDarkMatte
from MarketPlaces.M00nkeyMarket.crawler_selenium import crawler as crawlerM00nkeyMarket
from MarketPlaces.ViceCity.crawler_selenium import crawler as crawlerViceCity
from MarketPlaces.HiddenMarket.crawler_selenium import crawler as crawlerHiddenMarket
from MarketPlaces.RobinhoodMarket.crawler_selenium import crawler as crawlerRobinhoodMarket
import configparser
import time
@ -112,6 +113,8 @@ if __name__ == '__main__':
crawlerViceCity()
elif mkt == "HiddenMarket":
crawlerHiddenMarket()
elif mkt == "RobinhoodMarket":
crawlerRobinhoodMarket()


+ 5
- 0
MarketPlaces/Initialization/prepare_parser.py View File

@ -18,6 +18,7 @@ from MarketPlaces.DigitalThriftShop.parser import *
from MarketPlaces.LionMarketplace.parser import *
from MarketPlaces.TorMarket.parser import *
from MarketPlaces.HiddenMarket.parser import *
from MarketPlaces.RobinhoodMarket.parser import *
from MarketPlaces.Classifier.classify_product import predict
@ -172,6 +173,8 @@ def new_parse(marketPlace, url, createLog):
rmm = lionmarketplace_description_parser(soup)
elif marketPlace == "TorMarket":
rmm = tormarket_description_parser(soup)
elif marketPlace == "RobinhoodMarket":
rmm = Robinhood_description_parser(soup)
# key = u"Pr:" + rmm[0].upper()[:desc_lim1] + u" Vendor:" + rmm[13].upper()[:desc_lim2]
key = u"Url:" + os.path.basename(line2).replace(".html", "")
@ -241,6 +244,8 @@ def new_parse(marketPlace, url, createLog):
rw = lionmarketplace_listing_parser(soup)
elif marketPlace == "TorMarket":
rw = tormarket_listing_parser(soup)
elif marketPlace == "RobinhoodMarket":
rw = Robinhood_listing_parser(soup)
else:
parseError = True


+ 277
- 0
MarketPlaces/RobinhoodMarket/crawler_selenium.py View File

@ -0,0 +1,277 @@
__author__ = 'chris'
'''
WeTheNorth Market Crawler (Selenium)
'''
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.firefox.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from PIL import Image
import urllib.parse as urlparse
import os, re, time
import subprocess
import configparser
from bs4 import BeautifulSoup
from MarketPlaces.Initialization.prepare_parser import new_parse
from MarketPlaces.RobinhoodMarket.parser import Robinhood_links_parser
from MarketPlaces.Utilities.utilities import cleanHTML
config = configparser.ConfigParser()
config.read('../../setup.ini')
counter = 1
baseURL = 'http://ilr3qzubfnx33vbhal7l5coo4ftqlkv2tboph4ujog5crz6m5ua2b2ad.onion/'
# Opens Tor Browser, crawls the website
def startCrawling():
# Opening tor beforehand gives "Tor exited during startup error"
# opentor()
marketName = getMarketName()
driver = getAccess()
# Captcha
input("Press ENTER when website has loaded")
if driver != 'down':
try:
# Robinhood doesn't need login
# login(driver)
crawlForum(driver)
except Exception as e:
print(driver.current_url, e)
closetor(driver)
new_parse(marketName, baseURL, False)
# Opens Tor Browser
def opentor():
global pid
print("Connecting Tor...")
pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
pid = pro.pid
time.sleep(7.5)
input('Tor Connected. Press ENTER to continue\n')
return
# Login is not needed in Robinhood
def login(driver):
pass
# Returns the name of the website
def getMarketName():
name = 'RobinhoodMarket'
return name
# Return the link of the website
def getFixedURL():
url = 'http://ilr3qzubfnx33vbhal7l5coo4ftqlkv2tboph4ujog5crz6m5ua2b2ad.onion/'
return url
# Closes Tor Browser
def closetor(driver):
# global pid
# os.system("taskkill /pid " + str(pro.pid))
# os.system("taskkill /t /f /im tor.exe")
print('Closing Tor...')
driver.quit()
time.sleep(3)
return
# Creates FireFox 'driver' and configure its 'Profile'
# to use Tor proxy and socket
def createFFDriver():
ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
ff_prof.set_preference("places.history.enabled", False)
ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
ff_prof.set_preference("signon.rememberSignons", False)
ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
ff_prof.set_preference("network.dns.disablePrefetch", True)
ff_prof.set_preference("network.http.sendRefererHeader", 0)
ff_prof.set_preference("permissions.default.image", 3)
ff_prof.set_preference("browser.download.folderList", 2)
ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
ff_prof.set_preference('network.proxy.type', 1)
ff_prof.set_preference("network.proxy.socks_version", 5)
ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
ff_prof.set_preference('network.proxy.socks_port', 9150)
ff_prof.set_preference('network.proxy.socks_remote_dns', True)
ff_prof.set_preference("javascript.enabled", False)
ff_prof.update_preferences()
service = Service(config.get('TOR', 'geckodriver_path'))
driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
return driver
def getAccess():
url = getFixedURL()
driver = createFFDriver()
input('Tor Connected. Press ENTER to continue\n')
try:
driver.get(url)
return driver
except:
driver.close()
return 'down'
# Saves the crawled html page
def savePage(page, url):
cleanPage = cleanHTML(page)
filePath = getFullPathName(url)
os.makedirs(os.path.dirname(filePath), exist_ok=True)
open(filePath, 'wb').write(cleanPage.encode('utf-8'))
return
# Gets the full path of the page to be saved along with its appropriate file name
def getFullPathName(url):
from MarketPlaces.Initialization.markets_mining import CURRENT_DATE
fileName = getNameFromURL(url)
if isDescriptionLink(url):
fullPath = r'..\RobinhoodMarket\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html'
else:
fullPath = r'..\RobinhoodMarket\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html'
return fullPath
# Creates the file name from passed URL
def getNameFromURL(url):
global counter
name = ''.join(e for e in url if e.isalnum())
if name == '':
name = str(counter)
counter = counter + 1
return name
def getInterestedLinks():
links = []
# Hacking
links.append('http://ilr3qzubfnx33vbhal7l5coo4ftqlkv2tboph4ujog5crz6m5ua2b2ad.onion/product-category/hacking/')
# Other Software
links.append('http://ilr3qzubfnx33vbhal7l5coo4ftqlkv2tboph4ujog5crz6m5ua2b2ad.onion/product-category/other-software/')
return links
def crawlForum(driver):
print("Crawling the Robinhood market")
linksToCrawl = getInterestedLinks()
visited = set(linksToCrawl)
initialTime = time.time()
i = 0
count = 0
while i < len(linksToCrawl):
link = linksToCrawl[i]
print('Crawling :', link)
try:
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(html, link)
has_next_page = True
while has_next_page:
list = productPages(html)
for item in list:
itemURL = urlparse.urljoin(baseURL, str(item))
try:
driver.get(itemURL)
except:
driver.refresh()
savePage(driver.page_source, item)
driver.back()
# comment out
# break
# comment out
# if count == 1:
# count = 0
# break
# go to next page of market
try:
nav = driver.find_element(by=By.XPATH, value="//a[@class='next page-numbers']")
link = nav.get_attribute('href')
if link == "":
raise NoSuchElementException
try:
driver.get(link)
except:
driver.refresh()
html = driver.page_source
savePage(html, link)
count += 1
except NoSuchElementException:
has_next_page = False
except Exception as e:
print(link, e)
i += 1
# finalTime = time.time()
# print finalTime - initialTime
input("Crawling Robinhood market done successfully. Press ENTER to continue\n")
# Returns 'True' if the link is Topic link
def isDescriptionLink(url):
if 'product' in url and 'category' not in url:
return True
return False
# Returns True if the link is a listingPage link
def isListingLink(url):
if 'category=' in url:
return True
return False
# calling the parser to define the links
def productPages(html):
soup = BeautifulSoup(html, "html.parser")
return Robinhood_links_parser(soup)
def crawler():
startCrawling()
# print("Crawling and Parsing BestCardingWorld .... DONE!")
if __name__ == '__main__':
startCrawling()

+ 310
- 0
MarketPlaces/RobinhoodMarket/parser.py View File

@ -0,0 +1,310 @@
__author__ = 'chris'
import re
import traceback
# Here, we are importing the auxiliary functions to clean or convert data
from MarketPlaces.Utilities.utilities import *
# Here, we are importing BeautifulSoup to search through the HTML tree
from bs4 import BeautifulSoup
# Import for test run
import glob
import os
import codecs
import shutil
# This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
def Robinhood_description_parser(soup):
# Fields to be parsed
vendor = "-1" # 0 *Vendor_Name
success = "-1" # 1 Vendor_Successful_Transactions
rating_vendor = "-1" # 2 Vendor_Rating
name = "-1" # 3 *Product_Name
describe = "-1" # 4 Product_Description
CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
category = "-1" # 7 Product_Category
views = "-1" # 8 Product_Number_Of_Views
reviews = "-1" # 9 Product_Number_Of_Reviews
rating_item = "-1" # 10 Product_Rating
addDate = "-1" # 11 Product_AddedDate
BTC = "-1" # 12 Product_BTC_SellingPrice
USD = "-1" # 13 Product_USD_SellingPrice
EURO = "-1" # 14 Product_EURO_SellingPrice
sold = "-1" # 15 Product_QuantitySold
left = "-1" # 16 Product_QuantityLeft
shipFrom = "-1" # 17 Product_ShippedFrom
shipTo = "-1" # 18 Product_ShippedTo
# Finding Product Name
name = soup.find('h1').text
name = name.replace('\n', ' ')
name = name.replace(",", "")
name = name.strip()
# Finding description
desc = ''
primary = soup.find('div', {'id': 'primary'})
product = primary.findAll('div')[1]
commerce = product.findAll('div', recursive=False)[2]
descDiv = commerce.findAll('div')[0]
# descDiv = soup.find('div', {'class': 'woocommerce-Tabs-panel woocommerce-Tabs-panel--description panel entry-content wc-tab'})
descText = descDiv.findAll('p')
for para in descText:
desc = desc + para.text
describe = desc
# Finding Vendor
vendor = soup.find('a', {'class': 'wcfm_dashboard_item_title'}).text
vendor = vendor.replace(",", "")
vendor = vendor.replace("Sold by:", "")
vendor = vendor.strip()
# Finding Category
catSpan = soup.find('span', {'class': 'posted_in'})
category = catSpan.find('a').text
# Finding USD
priceText = soup.find('p', {'class': 'price'}).text
USD = str(priceText).strip()
# Searching for CVE and MS categories
cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if cve:
CVE = " "
for idx in cve:
CVE += (idx)
CVE += " "
CVE = CVE.replace(',', ' ')
CVE = CVE.replace('\n', '')
ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
if ms:
MS = " "
for im in ms:
MS += (im)
MS += " "
MS = MS.replace(',', ' ')
MS = MS.replace('\n', '')
# Populating the final variable (this should be a list with all fields scraped)
row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
BTC, USD, EURO, sold, left, shipFrom, shipTo)
# Sending the results
return row
# This is the method to parse the Listing Pages
def Robinhood_listing_parser(soup):
# Fields to be parsed
nm = 0 # *Total_Products (Should be Integer)
mktName = "Robinhood Market" # 0 *Marketplace_Name
vendor = [] # 1 *Vendor y
rating_vendor = [] # 2 Vendor_Rating
success = [] # 3 Vendor_Successful_Transactions
name = [] # 4 *Product_Name y
CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
MS = [] # 6 Product_MS_Classification (Microsoft Security)
category = [] # 7 Product_Category y
describe = [] # 8 Product_Description
views = [] # 9 Product_Number_Of_Views
reviews = [] # 10 Product_Number_Of_Reviews
rating_item = [] # 11 Product_Rating
addDate = [] # 12 Product_AddDate
BTC = [] # 13 Product_BTC_SellingPrice
USD = [] # 14 Product_USD_SellingPrice y
EURO = [] # 15 Product_EURO_SellingPrice
sold = [] # 16 Product_QuantitySold
qLeft =[] # 17 Product_QuantityLeft
shipFrom = [] # 18 Product_ShippedFrom
shipTo = [] # 19 Product_ShippedTo
href = [] # 20 Product_Links
listing = soup.find('ul', {"class": "products columns-4"})
items = listing.findAll('li')
# Populating the Number of Products
nm = len(items)
for card in items:
# Finding Category
cat = soup.find("h1").text
cat = cat.replace('\n', ' ')
cat = cat.replace(",", "")
cat = cat.strip()
category.append(cat)
bae = card.findAll('a')
# Adding the url to the list of urls
link = card.find('a').get('href')
href.append(link)
# Finding Product Name
product = card.find("h2").text
product = product.replace('\n', ' ')
product = product.replace(",", "")
product = product.strip()
name.append(product)
info = card.find('div', {'class': 'wcfmmp_sold_by_container'})
# Finding Vendor
vendor_name = info.find('a', {'class', 'wcfm_dashboard_item_title'}).text
vendor_name = vendor_name.replace(",", "")
vendor_name = vendor_name.strip()
vendor.append(vendor_name)
# Finding USD
span = card.find('span', {'class': 'price'})
if span is not None:
bdi = span.find('bdi')
usdText = bdi.find('span').next_sibling
usdVal = usdText.text
else:
usdVal = "0"
USD.append(usdVal)
# Searching for CVE and MS categories
cve = card.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
if not cve:
cveValue="-1"
else:
cee = " "
for idx in cve:
cee += (idx)
cee += " "
cee = cee.replace(',', ' ')
cee = cee.replace('\n', '')
cveValue=cee
CVE.append(cveValue)
ms = card.findAll(text=re.compile('MS\d{2}-\d{3}'))
if not ms:
MSValue="-1"
else:
me = " "
for im in ms:
me += (im)
me += " "
me = me.replace(',', ' ')
me = me.replace('\n', '')
MSValue=me
MS.append(MSValue)
#print(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
# reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
# Populate the final variable (this should be a list with all fields scraped)
return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
def Robinhood_links_parser(soup):
# Returning all links that should be visited by the Crawler
href = []
#list = soup.findAll('div', {"class": "woocommerce columns-4"})
listing = soup.find('ul', {"class": "products columns-4"}).findAll('li')
for item in listing:
link = item.find('a')['href']
href.append(link)
return href
if __name__ == '__main__':
nError = 0
marketPlace = 'RobinhoodMarket'
lines = [] # listing pages
lns = [] # description pages
detPage = {}
'''
# reading description pages
count = 0
for fileDescription in glob.glob(os.path.join("..\\" + marketPlace + "\\HTML_Pages\\08082023\\Description", '*.html')):
count += 1
lns.append(fileDescription)
# if count > 5:
# break
for index, line2 in enumerate(lns):
print("Reading description folder of '" + marketPlace + "', file '" + os.path.basename(line2) + "', index= " + str(index + 1) + " ... " + str(len(lns)))
try:
html = codecs.open(line2.strip('\n'), encoding='utf8')
soup = BeautifulSoup(html, "html.parser")
html.close()
except:
try:
html = open(line2.strip('\n'))
soup = BeautifulSoup(html, "html.parser")
html.close()
except:
nError += 1
print("There was a problem to read the file " + line2 + " in the Description section!")
# if createLog:
# logFile.write(str(nError) + ". There was a problem to read the file " + line2 + " in the Description section.\n")
continue
try:
print(Robinhood_description_parser(soup))
except:
traceback.print_exc()
print("There was a problem to parse the file " + line2 + " in the Description section!")
'''
# reading listing pages
count = 0
for fileListing in glob.glob(os.path.join("..\\" + marketPlace + "\\HTML_Pages\\08082023\\Listing", '*.html')):
count += 1
lines.append(fileListing)
#if count > 1:
# break
for index, line1 in enumerate(lines):
print("Reading listing folder of '" + marketPlace + "', file '" + os.path.basename(line1) + "', index= " + str(index + 1) + " ... " + str(len(lines)))
readError = False
try:
html = codecs.open(line1.strip('\n'), encoding='utf8')
soup = BeautifulSoup(html, "html.parser")
html.close()
except:
try:
html = open(line1.strip('\n'))
soup = BeautifulSoup(html, "html.parser")
html.close()
except:
print("There was a problem to read the file " + line1 + " in the Listing section!")
readError = True
if not readError:
parseError = False
try:
test = Robinhood_listing_parser(soup)
print(Robinhood_listing_parser(soup))
except:
traceback.print_exc()
print("There was a problem to parse the file " + line1 + " in the listing section!")
parseError = True
print("DONE")

Loading…
Cancel
Save