this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

330 lines
12 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. '''
  3. DarkFox Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.support.ui import WebDriverWait
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.common.by import By
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, time
  16. from datetime import date
  17. import subprocess
  18. from bs4 import BeautifulSoup
  19. from MarketPlaces.Initialization.prepare_parser import new_parse
  20. from MarketPlaces.DarkFox.parser import darkfox_links_parser
  21. from MarketPlaces.Utilities.utilities import cleanHTML
  22. counter = 1
  23. baseURL = 'http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/'
  24. # Opens Tor Browser, crawls the website, then parses, then closes tor
  25. #acts like the main method for the crawler, another function at the end of this code calls this function later
  26. def startCrawling():
  27. opentor()
  28. mktName = getMKTName()
  29. driver = getAccess()
  30. if driver != 'down':
  31. try:
  32. captcha(driver)
  33. crawlForum(driver)
  34. except Exception as e:
  35. print(driver.current_url, e)
  36. closetor(driver)
  37. new_parse(mktName, baseURL, False)
  38. # Opens Tor Browser
  39. #prompts for ENTER input to continue
  40. def opentor():
  41. from MarketPlaces.Initialization.markets_mining import config
  42. global pid
  43. print("Connecting Tor...")
  44. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  45. pid = pro.pid
  46. time.sleep(7.5)
  47. input('Tor Connected. Press ENTER to continue\n')
  48. return
  49. # Returns the name of the website
  50. #return: name of site in string type
  51. def getMKTName():
  52. name = 'DarkFox'
  53. return name
  54. # Returns credentials needed for the mkt
  55. def getCredentials():
  56. credentials = 'blank blank blank blank cap 0'
  57. return credentials
  58. # Return the base link of the website
  59. #return: url of base site in string type
  60. def getFixedURL():
  61. url = 'http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/'
  62. return url
  63. # Closes Tor Browser
  64. #@param: current selenium driver
  65. def closetor(driver):
  66. # global pid
  67. # os.system("taskkill /pid " + str(pro.pid))
  68. # os.system("taskkill /t /f /im tor.exe")
  69. print('Closing Tor...')
  70. driver.close()
  71. time.sleep(3)
  72. return
  73. # Creates FireFox 'driver' and configure its 'Profile'
  74. # to use Tor proxy and socket
  75. def createFFDriver():
  76. from MarketPlaces.Initialization.markets_mining import config
  77. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  78. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  79. # ff_prof.set_preference("places.history.enabled", False)
  80. # ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  81. # ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  82. # ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  83. # ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  84. # ff_prof.set_preference("signon.rememberSignons", False)
  85. # ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  86. # ff_prof.set_preference("network.dns.disablePrefetch", True)
  87. # ff_prof.set_preference("network.http.sendRefererHeader", 0)
  88. # ff_prof.set_preference("permissions.default.image", 2)
  89. # ff_prof.set_preference("browser.download.folderList", 2)
  90. # ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  91. # ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  92. ff_prof.set_preference('network.proxy.type', 1)
  93. ff_prof.set_preference("network.proxy.socks_version", 5)
  94. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  95. ff_prof.set_preference('network.proxy.socks_port', 9150)
  96. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  97. ff_prof.set_preference("javascript.enabled", False)
  98. ff_prof.update_preferences()
  99. service = Service(config.get('TOR', 'geckodriver_path'))
  100. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  101. return driver
  102. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  103. #return: return the selenium driver or string 'down'
  104. def getAccess():
  105. url = getFixedURL()
  106. driver = createFFDriver()
  107. try:
  108. driver.get(url)
  109. return driver
  110. except:
  111. driver.close()
  112. return 'down'
  113. # Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
  114. # then allows for manual solving of captcha in the terminal
  115. #@param: current selenium web driver
  116. def captcha(driver):
  117. # wait for captcha page show up
  118. WebDriverWait(driver, 100).until(EC.visibility_of_element_located((By.XPATH, "/html/body/div/div/form/button[1]")))
  119. # save captcha to local
  120. driver.find_element(by=By.XPATH, value="/html/body/div/div/form/div[1]/div[1]").screenshot(r'..\DarkFox\captcha.png')
  121. # open method used to open different extension image file
  122. im = Image.open(r'..\DarkFox\captcha.png')
  123. # This method will show image in any image viewer
  124. im.show()
  125. # wait until input space show up
  126. inputBox = driver.find_element(by=By.XPATH, value="/html/body/div/div/form/div[1]/div[2]/input")
  127. # ask user input captha solution in terminal
  128. userIn = input("Enter solution: ")
  129. # send user solution into the input space
  130. inputBox.send_keys(userIn)
  131. # click the verify(submit) button
  132. driver.find_element(by=By.XPATH, value="/html/body/div/div/form/button[1]").click()
  133. # wait for listing page show up (This Xpath may need to change based on different seed url)
  134. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  135. (By.XPATH, "/html/body/main/div/div/div[2]/div[1]/div[1]/form/div[1]/h1")))
  136. # Saves the crawled html page, makes the directory path for html pages if not made
  137. def savePage(page, url):
  138. cleanPage = cleanHTML(page)
  139. filePath = getFullPathName(url)
  140. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  141. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  142. return
  143. # Gets the full path of the page to be saved along with its appropriate file name
  144. #@param: raw url as crawler crawls through every site
  145. def getFullPathName(url):
  146. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  147. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  148. fileName = getNameFromURL(url)
  149. if isDescriptionLink(url):
  150. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  151. else:
  152. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  153. return fullPath
  154. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  155. #@param: raw url as crawler crawls through every site
  156. def getNameFromURL(url):
  157. global counter
  158. name = ''.join(e for e in url if e.isalnum())
  159. if (name == ''):
  160. name = str(counter)
  161. counter = counter + 1
  162. return name
  163. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  164. #in this example, there are a couple of categories some threads fall under such as
  165. # Guides and Tutorials, Digital Products, and Software and Malware
  166. #as you can see they are categories of products
  167. def getInterestedLinks():
  168. links = []
  169. # # Guides and Tutorials
  170. # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/30739153-1fcd-45cd-b919-072b439c6e06')
  171. # # Digital Products
  172. # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/0e384d5f-26ef-4561-b5a3-ff76a88ab781')
  173. # Software and Malware
  174. # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/6b71210f-f1f9-4aa3-8f89-bd9ee28f7afc')
  175. links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/6b71210f-f1f9-4aa3-8f89-bd9ee28f7afc?page=15')
  176. # # Services
  177. # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/b9dc5846-5024-421e-92e6-09ba96a03280')
  178. # # Miscellaneous
  179. # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/fd1c989b-1a74-4dc0-92b0-67d8c1c487cb')
  180. # # Hosting and Security
  181. # links.append('http://57d5j6bbwlpxbxe5tsjjy3vziktv3fo2o5j3nheo4gpg6lzpsimzqzid.onion/category/5233fd6a-72e6-466d-b108-5cc61091cd14')
  182. return links
  183. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  184. #topic and description pages are crawled through here, where both types of pages are saved
  185. #@param: selenium driver
  186. def crawlForum(driver):
  187. print("Crawling the DarkFox market")
  188. linksToCrawl = getInterestedLinks()
  189. i = 0
  190. while i < len(linksToCrawl):
  191. link = linksToCrawl[i]
  192. print('Crawling :', link)
  193. try:
  194. has_next_page = True
  195. count = 0
  196. while has_next_page:
  197. try:
  198. driver.get(link)
  199. except:
  200. driver.refresh()
  201. html = driver.page_source
  202. savePage(html, link)
  203. list = productPages(html)
  204. for item in list:
  205. itemURL = urlparse.urljoin(baseURL, str(item))
  206. try:
  207. driver.get(itemURL)
  208. except:
  209. driver.refresh()
  210. savePage(driver.page_source, item)
  211. driver.back()
  212. # comment out
  213. break
  214. # comment out
  215. if count == 0:
  216. break
  217. try:
  218. link = driver.find_element(by=By.XPATH, value=
  219. '/html/body/main/div/div[2]/div/div[2]/div/div/div/nav/a[2]').get_attribute('href')
  220. if link == "":
  221. raise NoSuchElementException
  222. count += 1
  223. except NoSuchElementException:
  224. has_next_page = False
  225. except Exception as e:
  226. print(link, e)
  227. i += 1
  228. input("Crawling BestCardingWorld forum done sucessfully. Press ENTER to continue\n")
  229. # Returns 'True' if the link is a description link
  230. #@param: url of any url crawled
  231. #return: true if is a description page, false if not
  232. def isDescriptionLink(url):
  233. if 'product' in url:
  234. return True
  235. return False
  236. # Returns True if the link is a listingPage link
  237. #@param: url of any url crawled
  238. #return: true if is a Listing page, false if not
  239. def isListingLink(url):
  240. if 'category' in url:
  241. return True
  242. return False
  243. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  244. #@param: link from interested link list ie. getInterestingLinks()
  245. #return: list of description links that should be crawled through
  246. def productPages(html):
  247. soup = BeautifulSoup(html, "html.parser")
  248. return darkfox_links_parser(soup)
  249. # Drop links that "signout"
  250. def isSignOut(url):
  251. #absURL = urlparse.urljoin(url.base_url, url.url)
  252. if 'signout' in url.lower() or 'logout' in url.lower():
  253. return True
  254. return False
  255. def crawler():
  256. startCrawling()
  257. # print("Crawling and Parsing BestCardingWorld .... DONE!")