this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

312 lines
10 KiB

  1. __author__ = 'DarkWeb'
  2. '''
  3. HiddenMarket Market Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. import subprocess
  17. import configparser
  18. from bs4 import BeautifulSoup
  19. from MarketPlaces.Initialization.prepare_parser import new_parse
  20. from MarketPlaces.HiddenMarket.parser import hiddenmarket_links_parser
  21. from MarketPlaces.Utilities.utilities import cleanHTML
  22. counter = 1
  23. baseURL = 'http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/'
  24. # Opens Tor Browser, crawls the website
  25. def startCrawling():
  26. marketName = getMKTName()
  27. driver = getAccess()
  28. if driver != 'down':
  29. try:
  30. login(driver)
  31. crawlForum(driver)
  32. except Exception as e:
  33. print(driver.current_url, e)
  34. closeDriver(driver)
  35. new_parse(marketName, baseURL, True)
  36. # Login using premade account credentials and do login captcha manually
  37. def login(driver):
  38. # wait for login page
  39. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  40. (By.XPATH, "/html/body/div[3]/div[3]")))
  41. # entering username and password into input boxes
  42. # usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
  43. # Username here
  44. # usernameBox.send_keys('ct1234')
  45. # passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]')
  46. # Password here
  47. # passwordBox.send_keys('DementedBed1230')
  48. '''
  49. # wait for captcha page show up
  50. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  51. (By.XPATH, "/html/body/main/div/div/div/div/div/form/div[3]/div/div[1]/label/img")))
  52. # save captcha to local
  53. driver.find_element(by=By.XPATH, value='/html/body/main/div/div/div/div/div/form/div[3]/div/div[1]/label/img').screenshot(
  54. r'..\captcha.png')
  55. # This method will show image in any image viewer
  56. im = Image.open(r'..\captcha.png')
  57. im.show()
  58. # wait until input space show up
  59. inputBox = driver.find_element(by=By.XPATH, value='//*[@id="captcha"]')
  60. # ask user input captcha solution in terminal
  61. userIn = input("Enter solution: ")
  62. # send user solution into the input space
  63. inputBox.send_keys(userIn)
  64. # click the verify(submit) button
  65. driver.find_element(by=By.XPATH, value="/html/body/main/div/div/div/div/div/form/div[4]/button").click()
  66. '''
  67. # input("Press ENTER when CAPTCHA is completed\n")
  68. # wait for listing page show up (This Xpath may need to change based on different seed url)
  69. # WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  70. # (By.XPATH, '/html/body/main/div/div/div[1]/div/div[1]/div/h5')))
  71. # Returns the name of the website
  72. def getMKTName():
  73. name = 'HiddenMarket'
  74. return name
  75. # Return the link of the website
  76. def getFixedURL():
  77. url = 'http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/'
  78. return url
  79. # Closes Tor Browser
  80. def closeDriver(driver):
  81. # global pid
  82. # os.system("taskkill /pid " + str(pro.pid))
  83. # os.system("taskkill /t /f /im tor.exe")
  84. print('Closing Tor...')
  85. driver.quit()
  86. time.sleep(3)
  87. return
  88. # Creates FireFox 'driver' and configure its 'Profile'
  89. # to use Tor proxy and socket
  90. def createFFDriver():
  91. from MarketPlaces.Initialization.markets_mining import config
  92. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  93. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  94. # ff_prof.set_preference("places.history.enabled", False)
  95. # ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  96. # ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  97. # ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  98. # ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  99. # ff_prof.set_preference("signon.rememberSignons", False)
  100. # ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  101. # ff_prof.set_preference("network.dns.disablePrefetch", True)
  102. # ff_prof.set_preference("network.http.sendRefererHeader", 0)
  103. ff_prof.set_preference("permissions.default.image", 3)
  104. ff_prof.set_preference("browser.download.folderList", 2)
  105. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  106. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  107. ff_prof.set_preference('network.proxy.type', 1)
  108. ff_prof.set_preference("network.proxy.socks_version", 5)
  109. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  110. ff_prof.set_preference('network.proxy.socks_port', 9150)
  111. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  112. ff_prof.set_preference("javascript.enabled", False)
  113. ff_prof.update_preferences()
  114. service = Service(config.get('TOR', 'geckodriver_path'))
  115. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  116. driver.maximize_window()
  117. return driver
  118. def getAccess():
  119. url = getFixedURL()
  120. driver = createFFDriver()
  121. try:
  122. driver.get(url)
  123. return driver
  124. except:
  125. driver.close()
  126. return 'down'
  127. # Saves the crawled html page
  128. def savePage(driver, page, url):
  129. cleanPage = cleanHTML(driver, page)
  130. filePath = getFullPathName(url)
  131. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  132. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  133. return
  134. # Gets the full path of the page to be saved along with its appropriate file name
  135. def getFullPathName(url):
  136. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  137. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  138. fileName = getNameFromURL(url)
  139. if isDescriptionLink(url):
  140. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  141. else:
  142. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  143. return fullPath
  144. # Creates the file name from passed URL
  145. def getNameFromURL(url):
  146. global counter
  147. name = ''.join(e for e in url if e.isalnum())
  148. if name == '':
  149. name = str(counter)
  150. counter = counter + 1
  151. return name
  152. def getInterestedLinks():
  153. links = []
  154. # # Civil Software
  155. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/civil_softwares')
  156. # # Tutorials - Carding
  157. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/carding')
  158. # Digital - Hacks
  159. links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/hacks')
  160. # # Digital - Exploit Kit
  161. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/exploit_kit')
  162. # # 0Day
  163. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/0day')
  164. # # Digital Forensics
  165. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/digital_forensics')
  166. # # Tutorials - Mining
  167. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/mining')
  168. # # Tutorials - Worms
  169. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/worms')
  170. # # Tutorials - Viruses
  171. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/viruses')
  172. # # Tutorials - Trojans
  173. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/trojans')
  174. # # Tutorials - Botnets
  175. # links.append('http://mipx6eedtsvfgfcmbm3utjedgjez2w4dzjdrbhtd2mt3cicwhhzspxqd.onion/category/botnets')
  176. return links
  177. def crawlForum(driver):
  178. print("Crawling the HiddenMarket market")
  179. linksToCrawl = getInterestedLinks()
  180. i = 0
  181. while i < len(linksToCrawl):
  182. link = linksToCrawl[i]
  183. print('Crawling :', link)
  184. categoryLink = link
  185. maxNumPages = 0 # temp value.
  186. try:
  187. has_next_page = True
  188. count = 0
  189. pageCount = 1
  190. while has_next_page:
  191. try:
  192. driver.get(link)
  193. if pageCount == 1:
  194. maxNumPages = int(driver.find_element(by=By.CLASS_NAME, value='main')
  195. .find_element(by=By.CLASS_NAME, value='pages')
  196. .find_elements(By.CLASS_NAME, value='page')[-1].text)
  197. except:
  198. driver.refresh()
  199. html = driver.page_source
  200. savePage(driver, html, link)
  201. list = productPages(html)
  202. for item in list:
  203. itemURL = urlparse.urljoin(baseURL, str(item))
  204. try:
  205. driver.get(itemURL)
  206. except:
  207. driver.refresh()
  208. savePage(driver, driver.page_source, item)
  209. driver.back()
  210. # comment out
  211. # break
  212. # comment out
  213. if count == 1:
  214. break
  215. try:
  216. pageCount += 1
  217. if pageCount > maxNumPages:
  218. raise NoSuchElementException
  219. pageLink = "/" + str(pageCount) + "/"
  220. link = categoryLink + pageLink
  221. count += 1
  222. except NoSuchElementException:
  223. has_next_page = False
  224. except Exception as e:
  225. print(link, e)
  226. i += 1
  227. print("Crawling the HiddenMarket market done.")
  228. # Returns 'True' if the link is Topic link
  229. def isDescriptionLink(url):
  230. if 'product' in url:
  231. return True
  232. return False
  233. # Returns True if the link is a listingPage link
  234. def isListingLink(url):
  235. if 'category' in url:
  236. return True
  237. return False
  238. # calling the parser to define the links
  239. def productPages(html):
  240. soup = BeautifulSoup(html, "html.parser")
  241. return hiddenmarket_links_parser(soup)
  242. def crawler():
  243. startCrawling()
  244. # print("Crawling and Parsing BestCardingWorld .... DONE!")