this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

284 lines
10 KiB

  1. __author__ = 'Helium'
  2. '''
  3. Nexus Market Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.support.ui import WebDriverWait
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.common.by import By
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. from datetime import date
  17. import subprocess
  18. import configparser
  19. from bs4 import BeautifulSoup
  20. from MarketPlaces.Initialization.prepare_parser import new_parse
  21. from MarketPlaces.Nexus.parser import nexus_links_parser
  22. from MarketPlaces.Utilities.utilities import cleanHTML
  23. counter = 1
  24. baseURL = 'http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion'
  25. # Opens Tor Browser, crawls the website, then parses, then closes tor
  26. #acts like the main method for the crawler, another function at the end of this code calls this function later
  27. def startCrawling():
  28. # opentor()
  29. mktName = getMKTName()
  30. driver = getAccess()
  31. if driver != 'down':
  32. try:
  33. crawlForum(driver)
  34. except Exception as e:
  35. print(driver.current_url, e)
  36. closetor(driver)
  37. new_parse(mktName, baseURL, True)
  38. # Opens Tor Browser
  39. #prompts for ENTER input to continue
  40. def opentor():
  41. from MarketPlaces.Initialization.markets_mining import config
  42. global pid
  43. print("Connecting Tor...")
  44. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  45. pid = pro.pid
  46. time.sleep(7.5)
  47. input('Tor Connected. Press ENTER to continue\n')
  48. return
  49. # Returns the name of the website
  50. #return: name of site in string type
  51. def getMKTName():
  52. name = 'Nexus'
  53. return name
  54. # Return the base link of the website
  55. #return: url of base site in string type
  56. def getFixedURL():
  57. url = 'http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion'
  58. return url
  59. # Closes Tor Browser
  60. #@param: current selenium driver
  61. def closetor(driver):
  62. # global pid
  63. # os.system("taskkill /pid " + str(pro.pid))
  64. # os.system("taskkill /t /f /im tor.exe")
  65. print('Closing Tor...')
  66. driver.close()
  67. time.sleep(3)
  68. return
  69. # Creates FireFox 'driver' and configure its 'Profile'
  70. # to use Tor proxy and socket
  71. def createFFDriver():
  72. from MarketPlaces.Initialization.markets_mining import config
  73. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  74. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  75. ff_prof.set_preference("places.history.enabled", False)
  76. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  77. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  78. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  79. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  80. ff_prof.set_preference("signon.rememberSignons", False)
  81. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  82. ff_prof.set_preference("network.dns.disablePrefetch", True)
  83. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  84. ff_prof.set_preference("permissions.default.image", 2)
  85. ff_prof.set_preference("browser.download.folderList", 2)
  86. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  87. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  88. ff_prof.set_preference('network.proxy.type', 1)
  89. ff_prof.set_preference("network.proxy.socks_version", 5)
  90. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  91. ff_prof.set_preference('network.proxy.socks_port', 9150)
  92. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  93. ff_prof.set_preference("javascript.enabled", False)
  94. ff_prof.update_preferences()
  95. service = Service(config.get('TOR', 'geckodriver_path'))
  96. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  97. driver.maximize_window()
  98. return driver
  99. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  100. #return: return the selenium driver or string 'down'
  101. def getAccess():
  102. url = getFixedURL()
  103. driver = createFFDriver()
  104. try:
  105. driver.get(url)
  106. return driver
  107. except:
  108. driver.close()
  109. return 'down'
  110. def savePage(driver, page, url):
  111. cleanPage = cleanHTML(driver, page)
  112. filePath = getFullPathName(url)
  113. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  114. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  115. return
  116. # Gets the full path of the page to be saved along with its appropriate file name
  117. #@param: raw url as crawler crawls through every site
  118. def getFullPathName(url):
  119. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  120. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  121. fileName = getNameFromURL(url)
  122. if isListingLink(url):
  123. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  124. else:
  125. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  126. return fullPath
  127. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  128. #@param: raw url as crawler crawls through every site
  129. def getNameFromURL(url):
  130. global counter
  131. name = ''.join(e for e in url if e.isalnum())
  132. if (name == ''):
  133. name = str(counter)
  134. counter = counter + 1
  135. return name
  136. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  137. #in this example, there are a couple of categories some threads fall under such as
  138. # Guides and Tutorials, Digital Products, and Software and Malware
  139. #as you can see they are categories of products
  140. def getInterestedLinks():
  141. links = []
  142. # malware
  143. links.append('http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion/categoria-produto/malware/')
  144. # # hacking-spam
  145. # links.append('http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion/categoria-produto/hacking-spam/')
  146. # # hacking services
  147. # links.append('http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion/categoria-produto/servicos/hacking/')
  148. # # programming services
  149. # links.append('http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion/categoria-produto/servicos/programacao/')
  150. # # remote admin services
  151. # links.append('http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion/categoria-produto/servicos/administracao-remota/')
  152. # # hacking guides
  153. # links.append('http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion/categoria-produto/guias-tutoriais/guia-de-hacking/')
  154. # # malware guides
  155. # links.append('http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion/categoria-produto/guias-tutoriais/guia-de-malware/')
  156. # # fraud guides
  157. # links.append('http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion/categoria-produto/guias-tutoriais/guia-de-fraudes/')
  158. # # fraud software
  159. # links.append('http://nexus2bmba34euohk3xo7og2zelkgbtc2p7rjsbxrjjknlecja2tdvyd.onion/categoria-produto/fraudes/software-de-fraude/')
  160. return links
  161. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  162. #topic and description pages are crawled through here, where both types of pages are saved
  163. #@param: selenium driver
  164. def crawlForum(driver):
  165. print("Crawling the Nexus market")
  166. linksToCrawl = getInterestedLinks()
  167. i = 0
  168. while i < len(linksToCrawl):
  169. link = linksToCrawl[i]
  170. print('Crawling :', link)
  171. try:
  172. has_next_page = True
  173. count = 0
  174. while has_next_page:
  175. try:
  176. driver.get(link)
  177. except:
  178. driver.refresh()
  179. html = driver.page_source
  180. savePage(driver, html, link)
  181. list = productPages(html)
  182. for item in list:
  183. itemURL = urlparse.urljoin(baseURL, str(item))
  184. try:
  185. driver.get(itemURL)
  186. except:
  187. driver.refresh()
  188. savePage(driver, driver.page_source, item)
  189. driver.back()
  190. # comment out
  191. # break
  192. # comment out
  193. if count == 1:
  194. break
  195. try:
  196. link = driver.find_element(by=By.XPATH, value=
  197. '/html/body/div[1]/div[2]/div/div/main/nav/ul/li[3]/a').get_attribute('href')
  198. if link == "":
  199. raise NoSuchElementException
  200. count += 1
  201. except NoSuchElementException:
  202. has_next_page = False
  203. except Exception as e:
  204. print(link, e)
  205. i += 1
  206. print("Crawling the Nexus market done.")
  207. # Returns 'True' if the link is a description link
  208. #@param: url of any url crawled
  209. #return: true if is a description page, false if not
  210. def isDescriptionLink(url):
  211. if 'produto' in url:
  212. return True
  213. return False
  214. # Returns True if the link is a listingPage link
  215. #@param: url of any url crawled
  216. #return: true if is a Listing page, false if not
  217. def isListingLink(url):
  218. if 'categoria-produto' in url:
  219. return True
  220. return False
  221. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  222. #@param: link from interested link list ie. getInterestingLinks()
  223. #return: list of description links that should be crawled through
  224. def productPages(html):
  225. soup = BeautifulSoup(html, "html.parser")
  226. return nexus_links_parser(soup)
  227. def crawler():
  228. startCrawling()
  229. # print("Crawling and Parsing Nexus .... DONE!")