this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

388 lines
13 KiB

  1. __author__ = 'Helium'
  2. '''
  3. Kerberos Market Crawler (Selenium)
  4. able to catch crawlers
  5. '''
  6. from selenium import webdriver
  7. from selenium.common.exceptions import NoSuchElementException
  8. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  9. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  10. from selenium.webdriver.firefox.service import Service
  11. from selenium.webdriver.common.by import By
  12. from selenium.webdriver.support import expected_conditions as EC
  13. from selenium.webdriver.support.ui import WebDriverWait
  14. from PIL import Image
  15. import urllib.parse as urlparse
  16. import os, time
  17. from datetime import date
  18. import subprocess
  19. from bs4 import BeautifulSoup
  20. from MarketPlaces.Initialization.prepare_parser import new_parse
  21. from MarketPlaces.Kerberos.parser import kerberos_links_parser
  22. from MarketPlaces.Utilities.utilities import cleanHTML
  23. counter = 1
  24. baseURL = 'http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion'
  25. # Opens Tor Browser, crawls the website
  26. def startCrawling():
  27. mktName = getMKTName()
  28. driver = getAccess()
  29. if driver != 'down':
  30. try:
  31. login(driver)
  32. crawlForum(driver)
  33. except Exception as e:
  34. print(driver.current_url, e)
  35. closeDriver(driver)
  36. # new_parse(mktName, baseURL, True)
  37. def captcha(driver):
  38. # do captchas manually and then wait
  39. input('Complete CAPTCHA\'s manually then press enter when completed')
  40. def closeDriver(driver):
  41. # global pid
  42. # os.system("taskkill /pid " + str(pro.pid))
  43. # os.system("taskkill /t /f /im tor.exe")
  44. print('Closing Tor...')
  45. driver.close()
  46. time.sleep(3)
  47. return
  48. # Login using premade account credentials and do login captcha manually
  49. def login(driver):
  50. captcha(driver)
  51. #wait for login page
  52. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  53. (By.XPATH, "/html/body/div[1]/div[2]")))
  54. input("There may be an enter button you need to press.\npress it now then press enter on the keyboard")
  55. #entering username and password into input boxes
  56. usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[1]')
  57. #Username here
  58. usernameBox.send_keys('blabri')
  59. passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[2]')
  60. #Password here
  61. passwordBox.send_keys('fishowal')
  62. input('complete CAPTCHA, press login, and then press enter on keyboard')
  63. # wait for captcha page show up
  64. # WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  65. # (By.XPATH, "/html/body/div/img[24]")))
  66. # time.sleep(10)
  67. # save captcha to local
  68. # driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/div[6]').screenshot(
  69. # r'..\Kerberos\captcha.png')
  70. #
  71. # # This method will show image in any image viewer
  72. # im = Image.open(r'..\Kerberos\captcha.png')
  73. #
  74. # im.show()
  75. #
  76. # # wait until input space show up
  77. # inputBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[2]/div/form/input[3]')
  78. # # ask user input captcha solution in terminal
  79. # userIn = input("Enter solution: ")
  80. #
  81. # # send user solution into the input space
  82. # inputBox.send_keys(userIn)
  83. #
  84. # # click the verify(submit) button
  85. # driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
  86. # driver.find_element(by=By.XPATH, value="/html/body/div[1]/div[2]/div/form/div[10]/button").click()
  87. #
  88. # # wait for listing page show up (This Xpath may need to change based on different seed url)
  89. # WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  90. # (By.XPATH, '//*[@id="breadcrumb"]')))
  91. # Returns the name of the website
  92. def getMKTName():
  93. name = 'Kerberos'
  94. return name
  95. # Return the link of the website
  96. def getFixedURL():
  97. url = 'http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion'
  98. return url
  99. # Closes Tor Browser
  100. def closetor(driver):
  101. # global pid
  102. # os.system("taskkill /pid " + str(pro.pid))
  103. # os.system("taskkill /t /f /im tor.exe")
  104. print('Closing Tor...')
  105. driver.quit()
  106. time.sleep(3)
  107. return
  108. # Creates FireFox 'driver' and configure its 'Profile'
  109. # to use Tor proxy and socket
  110. def createFFDriver():
  111. from MarketPlaces.Initialization.markets_mining import config
  112. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  113. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  114. ff_prof.set_preference("places.history.enabled", False)
  115. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  116. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  117. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  118. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  119. ff_prof.set_preference("signon.rememberSignons", False)
  120. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  121. # ff_prof.set_preference("network.dns.disablePrefetch", True)
  122. # ff_prof.set_preference("network.http.sendRefererHeader", 0)
  123. ff_prof.set_preference("permissions.default.image", 3)
  124. ff_prof.set_preference("browser.download.folderList", 2)
  125. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  126. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  127. ff_prof.set_preference('network.proxy.type', 1)
  128. ff_prof.set_preference("network.proxy.socks_version", 5)
  129. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  130. ff_prof.set_preference('network.proxy.socks_port', 9150)
  131. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  132. ff_prof.set_preference("javascript.enabled", False)
  133. ff_prof.update_preferences()
  134. service = Service(config.get('TOR', 'geckodriver_path'))
  135. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  136. driver.maximize_window()
  137. return driver
  138. def getAccess():
  139. url = getFixedURL()
  140. driver = createFFDriver()
  141. try:
  142. driver.get(url)
  143. return driver
  144. except:
  145. driver.close()
  146. return 'down'
  147. # Saves the crawled html page
  148. def savePage(driver, page, url):
  149. cleanPage = cleanHTML(driver, page)
  150. filePath = getFullPathName(url)
  151. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  152. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  153. return
  154. # Gets the full path of the page to be saved along with its appropriate file name
  155. def getFullPathName(url):
  156. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  157. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  158. fileName = getNameFromURL(url)
  159. if isDescriptionLink(url):
  160. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  161. else:
  162. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  163. return fullPath
  164. # Creates the file name from passed URL
  165. def getNameFromURL(url):
  166. global counter
  167. name = ''.join(e for e in url if e.isalnum())
  168. if name == '':
  169. name = str(counter)
  170. counter = counter + 1
  171. return name
  172. def getInterestedLinks():
  173. links = []
  174. # Services - Hacking
  175. links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/99/block/price-none/ww/ww/1/')
  176. # # Tutorials - Hacking
  177. # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/122/block/price-none/ww/ww/1/')
  178. # # Tutorials - Guides
  179. # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/124/block/price-none/ww/ww/1/')
  180. # # Tutorials - Other
  181. # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/126/block/price-none/ww/ww/1/')
  182. # # Software and Malware - Botnets
  183. # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/129/block/price-none/ww/ww/1/')
  184. # # Software and Malware - Malware
  185. # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/130/block/price-none/ww/ww/1/')
  186. # # Software and Malware - Trojans
  187. # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/131/block/price-none/ww/ww/1/')
  188. # # Software and Malware - Exploits / Kits
  189. # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/133/block/price-none/ww/ww/1/')
  190. # # Software and Malware - Other
  191. # links.append('http://kerberosazmnfrjinmftp3im3cr7hw4nxbavm4ngofn64g24be7h3kqd.onion/categories/136/block/price-none/ww/ww/1/')
  192. return links
  193. def crawlForum(driver):
  194. print("Crawling the Kerberos market")
  195. # linksToCrawl = getInterestedLinks()
  196. # visited = set(linksToCrawl)
  197. # initialTime = time.time()
  198. #
  199. # i = 0
  200. # count = 0
  201. # while i < len(linksToCrawl):
  202. # link = linksToCrawl[i]
  203. # print('Crawling :', link)
  204. #
  205. # try:
  206. # try:
  207. # driver.get(link)
  208. # except:
  209. # driver.refresh()
  210. # html = driver.page_source
  211. # savePage(driver, html, link)
  212. #
  213. # has_next_page = True
  214. # while has_next_page:
  215. # list = productPages(html)
  216. # for item in list:
  217. # itemURL = urlparse.urljoin(baseURL, str(item))
  218. # try:
  219. # driver.get(itemURL)
  220. # except:
  221. # driver.refresh()
  222. # savePage(driver,driver.page_source, item)
  223. # driver.back()
  224. # # break
  225. #
  226. # if count == 1:
  227. # count = 0
  228. # break
  229. #
  230. # try:
  231. # # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
  232. # # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[16]
  233. # nav = driver.find_element(by=By.XPATH, value=
  234. # '/html/body/div[3]/div[4]/div[4]/div/div[1]/div[28]')
  235. # a = nav.find_element(by=By.LINK_TEXT, value="Next")
  236. # link = a.get_attribute('href')
  237. #
  238. # if link == "":
  239. # raise NoSuchElementException
  240. # try:
  241. # driver.get(link)
  242. # except:
  243. # driver.refresh()
  244. # html = driver.page_source
  245. # savePage(driver, html, link)
  246. # count += 1
  247. #
  248. # except NoSuchElementException:
  249. # has_next_page = False
  250. #
  251. # except Exception as e:
  252. # print(link, e)
  253. # i += 1
  254. linksToCrawl = getInterestedLinks()
  255. i = 0
  256. while i < len(linksToCrawl):
  257. link = linksToCrawl[i]
  258. print('Crawling :', link)
  259. try:
  260. has_next_page = True
  261. count = 0
  262. while has_next_page:
  263. try:
  264. driver.get(link)
  265. except:
  266. driver.refresh()
  267. html = driver.page_source
  268. savePage(driver, html, link)
  269. list = productPages(html)
  270. for item in list:
  271. itemURL = urlparse.urljoin(baseURL, str(item))
  272. try:
  273. driver.get(itemURL)
  274. except:
  275. driver.refresh()
  276. savePage(driver, driver.page_source, item)
  277. driver.back()
  278. time.sleep(5)
  279. # comment out
  280. # break
  281. # comment out
  282. if count == 1:
  283. break
  284. try:
  285. # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
  286. # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
  287. # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[15]
  288. # /html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]/a[3]
  289. nav = driver.find_element(by=By.XPATH, value=
  290. '/html/body/div[4]/div[4]/div[4]/div/div[1]/div[28]')
  291. a = nav.find_element(by=By.LINK_TEXT, value="Next")
  292. link = a.get_attribute('href')
  293. if link == "":
  294. raise NoSuchElementException
  295. count += 1
  296. except NoSuchElementException:
  297. has_next_page = False
  298. except Exception as e:
  299. print(link, e)
  300. i += 1
  301. input("Crawling Kerberos market done sucessfully. Press ENTER to continue\n")
  302. # Returns 'True' if the link is Topic link
  303. def isDescriptionLink(url):
  304. if 'item' in url:
  305. return True
  306. return False
  307. # Returns True if the link is a listingPage link
  308. def isListingLink(url):
  309. if 'categories' in url:
  310. return True
  311. return False
  312. # calling the parser to define the links
  313. def productPages(html):
  314. soup = BeautifulSoup(html, "html.parser")
  315. return kerberos_links_parser(soup)
  316. def crawler():
  317. startCrawling()
  318. # print("Crawling and Parsing BestCardingWorld .... DONE!")