this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

301 lines
11 KiB

  1. __author__ = 'Helium'
  2. '''
  3. Dark Market Marketplace Crawler (Selenium)
  4. not complete
  5. need to go through multiple pages...
  6. '''
  7. from selenium import webdriver
  8. from selenium.common.exceptions import NoSuchElementException
  9. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  10. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  11. from selenium.webdriver.firefox.service import Service
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from selenium.webdriver.support import expected_conditions as EC
  14. from selenium.webdriver.common.by import By
  15. from PIL import Image
  16. import urllib.parse as urlparse
  17. import os, re, time
  18. from datetime import date
  19. import subprocess
  20. import configparser
  21. from bs4 import BeautifulSoup
  22. from MarketPlaces.Initialization.prepare_parser import new_parse
  23. from MarketPlaces.DarkMarket.parser import darkmarket_links_parser
  24. from MarketPlaces.Utilities.utilities import cleanHTML
  25. counter = 1
  26. baseURL = 'http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/'
  27. # Opens Tor Browser, crawls the website, then parses, then closes tor
  28. #acts like the main method for the crawler, another function at the end of this code calls this function later
  29. def startCrawling():
  30. mktName = getMKTName()
  31. driver = getAccess()
  32. if driver != 'down':
  33. try:
  34. crawlForum(driver)
  35. except Exception as e:
  36. print(driver.current_url, e)
  37. closeDriver(driver)
  38. new_parse(mktName, baseURL, True)
  39. # Returns the name of the website
  40. #return: name of site in string type
  41. def getMKTName():
  42. name = 'DarkMarket'
  43. return name
  44. # Return the base link of the website
  45. #return: url of base site in string type
  46. def getFixedURL():
  47. url = 'http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/'
  48. return url
  49. # Closes Tor Browser
  50. #@param: current selenium driver
  51. def closeDriver(driver):
  52. # global pid
  53. # os.system("taskkill /pid " + str(pro.pid))
  54. # os.system("taskkill /t /f /im tor.exe")
  55. print('Closing Tor...')
  56. driver.close()
  57. time.sleep(3)
  58. return
  59. # Creates FireFox 'driver' and configure its 'Profile'
  60. # to use Tor proxy and socket
  61. def createFFDriver():
  62. from MarketPlaces.Initialization.markets_mining import config
  63. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  64. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  65. ff_prof.set_preference("places.history.enabled", False)
  66. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  67. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  68. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  69. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  70. ff_prof.set_preference("signon.rememberSignons", False)
  71. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  72. ff_prof.set_preference("network.dns.disablePrefetch", True)
  73. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  74. ff_prof.set_preference("permissions.default.image", 3)
  75. ff_prof.set_preference("browser.download.folderList", 2)
  76. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  77. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  78. ff_prof.set_preference('network.proxy.type', 1)
  79. ff_prof.set_preference("network.proxy.socks_version", 5)
  80. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  81. ff_prof.set_preference('network.proxy.socks_port', 9150)
  82. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  83. ff_prof.set_preference("javascript.enabled", False)
  84. ff_prof.update_preferences()
  85. service = Service(config.get('TOR', 'geckodriver_path'))
  86. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  87. driver.maximize_window()
  88. return driver
  89. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  90. #return: return the selenium driver or string 'down'
  91. def getAccess():
  92. url = getFixedURL()
  93. driver = createFFDriver()
  94. try:
  95. driver.get(url)
  96. return driver
  97. except:
  98. driver.close()
  99. return 'down'
  100. # Saves the crawled html page, makes the directory path for html pages if not made
  101. def savePage(driver, page, url):
  102. cleanPage = cleanHTML(driver, page)
  103. filePath = getFullPathName(url)
  104. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  105. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  106. return
  107. # Gets the full path of the page to be saved along with its appropriate file name
  108. #@param: raw url as crawler crawls through every site
  109. def getFullPathName(url):
  110. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  111. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  112. fileName = getNameFromURL(url)
  113. if isListingLink(url):
  114. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  115. elif isDescriptionLink(url):
  116. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  117. else:
  118. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  119. return fullPath
  120. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  121. #@param: raw url as crawler crawls through every site
  122. def getNameFromURL(url):
  123. global counter
  124. name = ''.join(e for e in url if e.isalnum())
  125. if name == '':
  126. name = str(counter)
  127. counter = counter + 1
  128. return name
  129. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  130. #in this example, there are a couple of categories some threads fall under such as
  131. # Guides and Tutorials, Digital Products, and Software and Malware
  132. #as you can see they are categories of products
  133. def getInterestedLinks():
  134. links = []
  135. #Home for testing the next button
  136. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/')
  137. #Softwares
  138. links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/softwares/')
  139. # Guides and Training
  140. links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/guides-and-training/')
  141. #Tools and Other Accounts
  142. links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/tools-and-other-accounts/')
  143. #PayPal Transfers
  144. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/paypal-transfers/')
  145. #Cash App Transfers
  146. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/cash-app-transfers/')
  147. #Bank logs
  148. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/bank-logs-%f0%9f%a4%91/')
  149. #Carded Products
  150. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/carded-products-%f0%9f%92%8e/')
  151. #Cloned Cards
  152. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/cloned-cards/')
  153. #Credit Card cvv Pack
  154. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/credit-card-cvv-pack/')
  155. #Dumps
  156. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/dumps/')
  157. # Credit cards fullz Cc
  158. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/credit-cards-fullz-cc/')
  159. #Equipments
  160. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/equipment/')
  161. #Gift Cards
  162. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/gift-cards/')
  163. #Paypal accounts with ballance
  164. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/paypal-accounts-with-ballance/')
  165. #Visa Prepaid Cards
  166. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/visa-prepaid-crads/')
  167. #Western Union Transfers
  168. #links.append('http://wutrspt5x47ozzkxyvmrdcclp52upd56klbtqu5aefxbgkmz5ptiviyd.onion/product-category/western-union-transfer-%f0%9f%92%b1/')
  169. return links
  170. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  171. #topic and description pages are crawled through here, where both types of pages are saved
  172. #@param: selenium driver
  173. def crawlForum(driver):
  174. print("Crawling the DarkMarket market")
  175. linksToCrawl = getInterestedLinks()
  176. i = 0
  177. while i < len(linksToCrawl):
  178. link = linksToCrawl[i]
  179. print('Crawling :', link)
  180. try:
  181. has_next_page = True
  182. count = 0
  183. while has_next_page:
  184. try:
  185. driver.get(link)
  186. except:
  187. driver.refresh()
  188. html = driver.page_source
  189. savePage(driver, html, linksToCrawl[i] + f"page{count+1}")
  190. list = productPages(html)
  191. for item in list:
  192. # what is this line doing?
  193. itemURL = urlparse.urljoin(baseURL, str(item))
  194. try:
  195. driver.get(itemURL)
  196. except:
  197. driver.refresh()
  198. savePage(driver, driver.page_source, item)
  199. driver.back()
  200. # # comment out
  201. # break
  202. #
  203. # # comment out
  204. #if count == 1:
  205. #break
  206. #Finding next page, but have not tested if the class name and value work
  207. try:
  208. link = driver.find_element(by=By.XPATH, value= '/html/body/div[1]/div/div/div/div[2]/div/div/main/nav/ul/li[3]/a').get_attribute('href')
  209. if link == "":
  210. raise NoSuchElementException
  211. count += 1
  212. except NoSuchElementException:
  213. has_next_page = False
  214. except Exception as e:
  215. print(link, e)
  216. i += 1
  217. print("Crawling the DarkMarket market done.")
  218. # Returns 'True' if the link is a description link
  219. #@param: url of any url crawled
  220. #return: true if is a description page, false if not
  221. def isDescriptionLink(url):
  222. if 'product' in url:
  223. return True
  224. return False
  225. # Returns True if the link is a listingPage link
  226. #@param: url of any url crawled
  227. #return: true if is a Listing page, false if not
  228. def isListingLink(url):
  229. if 'product-category' in url:
  230. return True
  231. return False
  232. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  233. #@param: link from interested link list ie. getInterestingLinks()
  234. #return: list of description links that should be crawled through
  235. def productPages(html):
  236. soup = BeautifulSoup(html, "html.parser")
  237. return darkmarket_links_parser(soup)
  238. def crawler():
  239. startCrawling()
  240. # print("Crawling and Parsing BestCardingWorld .... DONE!")