this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

293 lines
11 KiB

  1. __author__ = 'Helium'
  2. '''
  3. Anon Market Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.support.ui import WebDriverWait
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.common.by import By
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. from datetime import date
  17. import subprocess
  18. import configparser
  19. from bs4 import BeautifulSoup
  20. from MarketPlaces.Initialization.prepare_parser import new_parse
  21. from MarketPlaces.AnonMarket.parser import AnonMarket_links_parser
  22. from MarketPlaces.Utilities.utilities import cleanHTML
  23. counter = 1
  24. baseURL = 'http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion'
  25. # Opens Tor Browser, crawls the website, then parses, then closes tor
  26. #acts like the main method for the crawler, another function at the end of this code calls this function later
  27. def startCrawling():
  28. mktName = getMKTName()
  29. driver = getAccess()
  30. if driver != 'down':
  31. try:
  32. crawlForum(driver)
  33. except Exception as e:
  34. print(driver.current_url, e)
  35. closeDriver(driver)
  36. new_parse(mktName, baseURL, True)
  37. # Returns the name of the website
  38. #return: name of site in string type
  39. def getMKTName():
  40. name = 'AnonMarket'
  41. return name
  42. # Return the base link of the website
  43. #return: url of base site in string type
  44. def getFixedURL():
  45. url = 'http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion'
  46. return url
  47. # Closes Tor Browser
  48. #@param: current selenium driver
  49. def closeDriver(driver):
  50. # global pid
  51. # os.system("taskkill /pid " + str(pro.pid))
  52. # os.system("taskkill /t /f /im tor.exe")
  53. print('Closing Tor...')
  54. driver.close()
  55. time.sleep(3)
  56. return
  57. # Creates FireFox 'driver' and configure its 'Profile'
  58. # to use Tor proxy and socket
  59. def createFFDriver():
  60. from MarketPlaces.Initialization.markets_mining import config
  61. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  62. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  63. ff_prof.set_preference("places.history.enabled", False)
  64. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  65. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  66. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  67. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  68. ff_prof.set_preference("signon.rememberSignons", False)
  69. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  70. ff_prof.set_preference("network.dns.disablePrefetch", True)
  71. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  72. ff_prof.set_preference("permissions.default.image", 3)
  73. ff_prof.set_preference("browser.download.folderList", 2)
  74. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  75. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  76. ff_prof.set_preference('network.proxy.type', 1)
  77. ff_prof.set_preference("network.proxy.socks_version", 5)
  78. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  79. ff_prof.set_preference('network.proxy.socks_port', 9150)
  80. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  81. ff_prof.set_preference("javascript.enabled", False)
  82. ff_prof.update_preferences()
  83. service = Service(config.get('TOR', 'geckodriver_path'))
  84. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  85. driver.maximize_window()
  86. return driver
  87. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  88. #return: return the selenium driver or string 'down'
  89. def getAccess():
  90. url = getFixedURL()
  91. driver = createFFDriver()
  92. try:
  93. driver.get(url)
  94. return driver
  95. except:
  96. driver.close()
  97. return 'down'
  98. def savePage(driver, page, url):
  99. cleanPage = cleanHTML(driver, page)
  100. filePath = getFullPathName(url)
  101. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  102. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  103. return
  104. # Gets the full path of the page to be saved along with its appropriate file name
  105. #@param: raw url as crawler crawls through every site
  106. def getFullPathName(url):
  107. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  108. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  109. fileName = getNameFromURL(url)
  110. if isDescriptionLink(url):
  111. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  112. else:
  113. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  114. return fullPath
  115. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  116. #@param: raw url as crawler crawls through every site
  117. def getNameFromURL(url):
  118. global counter
  119. name = ''.join(e for e in url if e.isalnum())
  120. if (name == ''):
  121. name = str(counter)
  122. counter = counter + 1
  123. return name
  124. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  125. #in this example, there are a couple of categories some threads fall under such as
  126. # Guides and Tutorials, Digital Products, and Software and Malware
  127. #as you can see they are categories of products
  128. def getInterestedLinks():
  129. links = []
  130. # Malware
  131. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/malware')
  132. # Bootkits
  133. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/bootkits')
  134. # Backdoors
  135. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/backdoors')
  136. # Keyloggers
  137. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/keyloggers')
  138. # Wireless Trackers
  139. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/wireless_trackers')
  140. # Screen Scrapers
  141. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/screen_scrapers')
  142. # Mobile Forensic Tools
  143. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/mobile_forensics_tools')
  144. # Wifi Jammers
  145. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/wifi_jammers')
  146. # Carding
  147. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/carding')
  148. # Worms
  149. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/worms')
  150. # Viruses
  151. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/viruses')
  152. # Trojans
  153. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/trojans')
  154. # Botnets
  155. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/botnets')
  156. # Security Technology
  157. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/security_technology')
  158. # Hacks
  159. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/hacks')
  160. # Exploit kits
  161. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/exploit_kit')
  162. # Security
  163. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/security')
  164. # Ransomware
  165. links.append('http://2r7wa5og3ly4umqhmmqqytae6bufl5ql5kz7sorndpqtrkc2ri7tohad.onion/category/ransomware')
  166. return links
  167. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  168. #topic and description pages are crawled through here, where both types of pages are saved
  169. #@param: selenium driver
  170. def crawlForum(driver):
  171. print("Crawling the Anon Market")
  172. linksToCrawl = getInterestedLinks()
  173. for link in linksToCrawl:
  174. print('Crawling :', link)
  175. try:
  176. has_next_page = True
  177. count = 0
  178. while has_next_page:
  179. try:
  180. driver.get(link)
  181. except:
  182. driver.refresh()
  183. html = driver.page_source
  184. savePage(driver, html, link)
  185. # Get all product links on the current page
  186. products_list = productPages(html)
  187. for item in products_list:
  188. itemURL = urlparse.urljoin(baseURL, str(item))
  189. try:
  190. driver.get(itemURL)
  191. except:
  192. driver.refresh()
  193. savePage(driver, driver.page_source, item)
  194. driver.back() # Go back to listing after visiting each product
  195. # # comment out
  196. # break
  197. #
  198. # # comment out
  199. # if count == 1:
  200. # break
  201. # Locate the next page link
  202. try:
  203. # Find the active page number
  204. active_page_element = driver.find_element(By.XPATH, '//div[@class="page activepage"]')
  205. # current_page = int(active_page_element.text)
  206. next_page_element = active_page_element.find_element(By.XPATH, 'following-sibling::a[1]')
  207. link = next_page_element.get_attribute('href')
  208. if link == "":
  209. raise NoSuchElementException
  210. count += 1
  211. except NoSuchElementException:
  212. has_next_page = False
  213. except Exception as e:
  214. print(link, e)
  215. print("Crawling the Anon Market done.")
  216. # Returns 'True' if the link is a description link
  217. #@param: url of any url crawled
  218. #return: true if is a description page, false if not
  219. def isDescriptionLink(url):
  220. if 'product' in url:
  221. return True
  222. return False
  223. # Returns True if the link is a listingPage link
  224. #@param: url of any url crawled
  225. #return: true if is a Listing page, false if not
  226. def isListingLink(url):
  227. if 'category' in url:
  228. return True
  229. return False
  230. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  231. #@param: link from interested link list ie. getInterestingLinks()
  232. #return: list of description links that should be crawled through
  233. def productPages(html):
  234. soup = BeautifulSoup(html, "html.parser")
  235. return AnonMarket_links_parser(soup)
  236. def crawler():
  237. startCrawling()
  238. # print("Crawling and Parsing Nexus .... DONE!")