this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

333 lines
10 KiB

  1. __author__ = 'Helium'
  2. """
  3. DarkDock Marketplace Crawler (Selenium)
  4. """
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.support.ui import WebDriverWait
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.common.by import By
  13. import urllib.parse as urlparse
  14. import os, time
  15. from bs4 import BeautifulSoup
  16. from MarketPlaces.Initialization.prepare_parser import new_parse
  17. from MarketPlaces.DarkDock.parser import darkdock_links_parser
  18. from MarketPlaces.Utilities.utilities import cleanHTML
  19. counter = 1
  20. baseURL = 'http://oirolrkrppy6sei6x6bvkkdolc4cjqzqfhxisfzu6exqblahwrrvktyd.onion/'
  21. def startCrawling():
  22. """Main method for the crawler.
  23. Opens Tor Browser, crawls the website, parses, then closes Tor.
  24. """
  25. mktName = getMKTName()
  26. driver = getAccess()
  27. if driver != 'down':
  28. try:
  29. crawlMarket(driver)
  30. except Exception as e:
  31. print(driver.current_url, e)
  32. closeDriver(driver)
  33. new_parse(mktName, baseURL, True)
  34. def getMKTName():
  35. """Returns the name of the website.
  36. """
  37. name = 'DarkDock'
  38. return name
  39. def getFixedURL():
  40. """Returns the base link of site.
  41. """
  42. url = 'http://oirolrkrppy6sei6x6bvkkdolc4cjqzqfhxisfzu6exqblahwrrvktyd.onion/'
  43. return url
  44. def closeDriver(driver):
  45. """Closes Tor Browser.
  46. Args:
  47. driver: The selected Selenium driver.
  48. """
  49. # global pid
  50. # os.system("taskkill /pid " + str(pro.pid))
  51. # os.system("taskkill /t /f /im tor.exe")
  52. print('Closing Tor...')
  53. driver.close()
  54. time.sleep(3)
  55. return
  56. def createFFDriver():
  57. """Creates FireFox 'driver' and configure its 'Profile' to use Tor proxy and socket.
  58. """
  59. from MarketPlaces.Initialization.markets_mining import config
  60. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  61. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  62. ff_prof.set_preference("places.history.enabled", False)
  63. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  64. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  65. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  66. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  67. ff_prof.set_preference("signon.rememberSignons", False)
  68. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  69. ff_prof.set_preference("network.dns.disablePrefetch", True)
  70. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  71. ff_prof.set_preference("permissions.default.image", 3)
  72. ff_prof.set_preference("browser.download.folderList", 2)
  73. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  74. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  75. ff_prof.set_preference('network.proxy.type', 1)
  76. ff_prof.set_preference("network.proxy.socks_version", 5)
  77. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  78. ff_prof.set_preference('network.proxy.socks_port', 9150)
  79. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  80. ff_prof.set_preference("javascript.enabled", False)
  81. ff_prof.update_preferences()
  82. service = Service(config.get('TOR', 'geckodriver_path'))
  83. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  84. driver.maximize_window()
  85. return driver
  86. def getAccess():
  87. """The driver 'gets' the url and attempts to access the site.
  88. Return:
  89. A Selenium driver currently on the site or the string 'down' if it can't access the site.
  90. """
  91. url = getFixedURL()
  92. driver = createFFDriver()
  93. try:
  94. driver.get(url)
  95. return driver
  96. except:
  97. driver.close()
  98. return 'down'
  99. def savePage(driver, page, url):
  100. """Saves the crawled html page.
  101. Cleans the html of the current page the driver is on. Then saves the current
  102. crawled html page with its full path name without special characters into the
  103. marketplace's directory. If the directory path doesn't exist it will make it.
  104. Args:
  105. driver: The Selenium driver accessing the page.
  106. page: The html of the saved page.
  107. url: The URL of the saved page.
  108. """
  109. cleanPage = cleanHTML(driver, page)
  110. filePath = getFullPathName(url)
  111. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  112. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  113. return
  114. def getFullPathName(url):
  115. """Gets the full path name.
  116. Gets the full path of the page to be saved along with its appropriate file name.
  117. Determines which subdirectory to save the page, based on whether it is a description
  118. or listing page.
  119. Args:
  120. url: The URL of the page.
  121. """
  122. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  123. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  124. fileName = getNameFromURL(url)
  125. if isDescriptionLink(url):
  126. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  127. else:
  128. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  129. return fullPath
  130. def getNameFromURL(url):
  131. """Creates the file name from the passed URL.
  132. Generates a file name with only its alphanumeric characters.
  133. If the name isn't unique, it will be given a unique name.
  134. Args:
  135. url: The URL of the selected page from the crawler as it crawls through the site.
  136. """
  137. global counter
  138. name = ''.join(e for e in url if e.isalnum())
  139. if (name == ''):
  140. name = str(counter)
  141. counter = counter + 1
  142. return name
  143. def getInterestedLinks():
  144. """Returns list of urls the crawlers runs through.
  145. Returns a list of the different urls of interest that the crawler runs through.
  146. An example of this can be different categories of a market related to hacking,
  147. such as Software and Malware, Guides and Tutorials, Digital Products.
  148. """
  149. links = []
  150. categories = [
  151. 'civil_softwares',
  152. 'carding',
  153. 'theft',
  154. 'mining',
  155. 'worms',
  156. 'dump',
  157. 'viruses',
  158. 'trojans',
  159. 'botnets',
  160. 'security_technology',
  161. 'computers',
  162. 'confidential_info',
  163. 'network_services',
  164. 'database',
  165. 'surveillance',
  166. 'digital_forensics',
  167. '0day',
  168. 'intelligence',
  169. 'private_security'
  170. ]
  171. for category in categories:
  172. links.append(baseURL + "category/" + category)
  173. return links
  174. def crawlMarket(driver):
  175. """Crawls and saves each page of a link of interest.
  176. Accesses, saves, and crawls through each link of interest. For DarkDock, each
  177. link of interest is a category, so we crawl through all numbered pages of the
  178. category. We find the URL of all descriptions/products on the category page, and save each
  179. individual description/product page.
  180. Args:
  181. driver: The Selenium driver accessing the site.
  182. """
  183. print("Crawling the DarkDock market")
  184. linksToCrawl = getInterestedLinks()
  185. i = 0
  186. while i < len(linksToCrawl):
  187. baseCategoryLink = linksToCrawl[i]
  188. link = linksToCrawl[i]
  189. print('Crawling :', link)
  190. try:
  191. has_next_page = True
  192. count = 2
  193. while has_next_page:
  194. # Try to access current link and reload if fails
  195. try:
  196. driver.get(link)
  197. except:
  198. driver.refresh()
  199. # Save html page
  200. html = driver.page_source
  201. savePage(driver, html, linksToCrawl[i] + f"page{count}")
  202. # Parse the product/description pages
  203. list = descriptionPages(html)
  204. for item in list:
  205. # Fetches the item URL by concatenating the base url with the item sub url
  206. itemURL = urlparse.urljoin(baseURL, str(item))
  207. try:
  208. driver.get(itemURL)
  209. except:
  210. driver.refresh()
  211. savePage(driver, driver.page_source, item)
  212. # Go back to the previous category page
  213. driver.back()
  214. # # Add a break for testing if we are checking only the first description/product page
  215. # break
  216. #
  217. # # Add a break for testing based on how many numbers of pages to test
  218. # if count == 3:
  219. # break
  220. # Try to find the next page
  221. try:
  222. link = f"{baseCategoryLink}/{count}/"
  223. driver.find_element(By.XPATH, f'//a[@href="{urlparse.urlparse(link).path}"]')
  224. if link == "":
  225. raise NoSuchElementException
  226. count += 1
  227. except NoSuchElementException:
  228. has_next_page = False
  229. except Exception as e:
  230. print(link, e)
  231. i += 1
  232. print("Crawling the DarkDock market done.")
  233. def isDescriptionLink(url):
  234. """Returns whether the url is for a description page.
  235. Args:
  236. url: The url of a crawled page.
  237. Returns:
  238. Returns 'True' if the url is for a description page. Returns 'False' if the
  239. url is not for a description page.
  240. """
  241. if 'product' in url:
  242. return True
  243. return False
  244. def isListingLink(url):
  245. """Returns whether the url is for a listing page.
  246. Args:
  247. url: The url of a crawled page.
  248. Returns:
  249. Returns 'True' if the url is for a listing page. Returns 'False' if the
  250. url is not for a listing page.
  251. """
  252. if 'category' in url:
  253. return True
  254. return False
  255. def descriptionPages(html):
  256. """Returns all product/description links on the current page.
  257. Passes the html of the category/listing page and parses it for
  258. any description/product links.
  259. Args:
  260. html: The html of the selected category/listing page.
  261. """
  262. soup = BeautifulSoup(html, "html.parser")
  263. return darkdock_links_parser(soup)
  264. def crawler():
  265. """Starts the crawler.
  266. """
  267. startCrawling()