this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

312 lines
11 KiB

  1. __author__ = 'Helium'
  2. '''
  3. BlackPyramid Forum Crawler (Selenium)
  4. cannot use bc no links are used
  5. kept in case issues are solved
  6. '''
  7. from selenium import webdriver
  8. from selenium.common.exceptions import NoSuchElementException
  9. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  10. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  11. from selenium.webdriver.firefox.service import Service
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from selenium.webdriver.support import expected_conditions as EC
  14. from selenium.webdriver.common.by import By
  15. from PIL import Image
  16. import urllib.parse as urlparse
  17. import os, re, time
  18. from datetime import date
  19. import subprocess
  20. import configparser
  21. from bs4 import BeautifulSoup
  22. from MarketPlaces.Initialization.prepare_parser import new_parse
  23. from MarketPlaces.BlackPyramid.parser import blackpyramid_links_parser
  24. from MarketPlaces.Utilities.utilities import cleanHTML
  25. counter = 1
  26. baseURL = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/login/'
  27. # Opens Tor Browser, crawls the website, then parses, then closes tor
  28. #acts like the main method for the crawler, another function at the end of this code calls this function later
  29. def startCrawling():
  30. # opentor()
  31. mktName = getMKTName()
  32. driver = getAccess()
  33. if driver != 'down':
  34. try:
  35. login(driver)
  36. crawlForum(driver)
  37. except Exception as e:
  38. print(driver.current_url, e)
  39. closetor(driver)
  40. new_parse(mktName, baseURL, True)
  41. # Opens Tor Browser
  42. #prompts for ENTER input to continue
  43. def opentor():
  44. from MarketPlaces.Initialization.markets_mining import config
  45. global pid
  46. print("Connecting Tor...")
  47. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  48. pid = pro.pid
  49. time.sleep(7.5)
  50. input('Tor Connected. Press ENTER to continue\n')
  51. return
  52. # Returns the name of the website
  53. #return: name of site in string type
  54. def getMKTName():
  55. name = 'BlackPyramid'
  56. return name
  57. # Return the base link of the website
  58. #return: url of base site in string type
  59. def getFixedURL():
  60. url = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/'
  61. return url
  62. # Closes Tor Browser
  63. #@param: current selenium driver
  64. def closetor(driver):
  65. # global pid
  66. # os.system("taskkill /pid " + str(pro.pid))
  67. # os.system("taskkill /t /f /im tor.exe")
  68. print('Closing Tor...')
  69. driver.close()
  70. time.sleep(3)
  71. return
  72. # Creates FireFox 'driver' and configure its 'Profile'
  73. # to use Tor proxy and socket
  74. def createFFDriver():
  75. from MarketPlaces.Initialization.markets_mining import config
  76. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  77. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  78. ff_prof.set_preference("places.history.enabled", False)
  79. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  80. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  81. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  82. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  83. ff_prof.set_preference("signon.rememberSignons", False)
  84. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  85. ff_prof.set_preference("network.dns.disablePrefetch", True)
  86. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  87. ff_prof.set_preference("permissions.default.image", 2)
  88. ff_prof.set_preference("browser.download.folderList", 2)
  89. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  90. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  91. ff_prof.set_preference('network.proxy.type', 1)
  92. ff_prof.set_preference("network.proxy.socks_version", 5)
  93. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  94. ff_prof.set_preference('network.proxy.socks_port', 9150)
  95. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  96. ff_prof.set_preference("javascript.enabled", False)
  97. ff_prof.update_preferences()
  98. service = Service(config.get('TOR', 'geckodriver_path'))
  99. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  100. driver.maximize_window()
  101. return driver
  102. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  103. #return: return the selenium driver or string 'down'
  104. def getAccess():
  105. url = getFixedURL()
  106. driver = createFFDriver()
  107. try:
  108. driver.get(url)
  109. return driver
  110. except:
  111. driver.close()
  112. return 'down'
  113. # Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
  114. # then allows for manual solving of captcha in the terminal
  115. #@param: current selenium web driver
  116. def login(driver):
  117. # wait for login page
  118. login_link = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[3]/div/main/div/div/div/div[2]/div/div/div/section[1]/input[1]')
  119. login_link.click() # open tab with url
  120. # entering username and password into input boxes
  121. usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
  122. # Username here
  123. usernameBox.send_keys('ChipotleSteakBurrito')
  124. passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]')
  125. # Password here
  126. passwordBox.send_keys('BlackBeans')
  127. input("Press ENTER when CAPTCHA is completed\n")
  128. # wait for listing page show up (This Xpath may need to change based on different seed url)
  129. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  130. (By.XPATH, '/html/body/div[2]/form/nav/nav/ul/li[2]/div/a/span[1]')))
  131. # Saves the crawled html page, makes the directory path for html pages if not made
  132. def savePage(driver, page, url):
  133. cleanPage = cleanHTML(driver, page)
  134. filePath = getFullPathName(url)
  135. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  136. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  137. return
  138. # Gets the full path of the page to be saved along with its appropriate file name
  139. #@param: raw url as crawler crawls through every site
  140. def getFullPathName(url):
  141. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  142. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  143. fileName = getNameFromURL(url)
  144. if isDescriptionLink(url):
  145. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  146. else:
  147. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  148. return fullPath
  149. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  150. #@param: raw url as crawler crawls through every site
  151. def getNameFromURL(url):
  152. global counter
  153. name = ''.join(e for e in url if e.isalnum())
  154. if (name == ''):
  155. name = str(counter)
  156. counter = counter + 1
  157. return name
  158. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  159. #in this example, there are a couple of categories some threads fall under such as
  160. # Guides and Tutorials, Digital Products, and Software and Malware
  161. #as you can see they are categories of products
  162. def getInterestedLinks():
  163. links = []
  164. # Hacking Guides
  165. links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  166. # # Exploits
  167. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  168. # # botnets/malware
  169. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  170. # # fraud software
  171. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  172. # # Other Tools
  173. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  174. # # Services
  175. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  176. return links
  177. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  178. #topic and description pages are crawled through here, where both types of pages are saved
  179. #@param: selenium driver
  180. def crawlForum(driver):
  181. print("Crawling the BlackPyramid market")
  182. linksToCrawl = getInterestedLinks()
  183. i = 0
  184. while i < len(linksToCrawl):
  185. link = linksToCrawl[i]
  186. print('Crawling :', link)
  187. try:
  188. has_next_page = True
  189. count = 0
  190. while has_next_page:
  191. try:
  192. clicker = driver.find_element(by=By.XPATH, value='/html/body/div[2]/form/nav/nav/ul/li[2]/div/a')
  193. clicker.click() # open tab with url
  194. driver.get(link)
  195. except:
  196. driver.refresh()
  197. html = driver.page_source
  198. savePage(driver, html, link)
  199. list = productPages(html)
  200. for item in list:
  201. itemURL = urlparse.urljoin(baseURL, str(item))
  202. try:
  203. driver.get(itemURL)
  204. except:
  205. driver.refresh()
  206. savePage(driver, driver.page_source, item)
  207. driver.back()
  208. # comment out
  209. break
  210. # comment out
  211. if count == 1:
  212. break
  213. try:
  214. clicker = driver.find_element(by=By.XPATH, value=
  215. '/html/body/center/div[4]/div/div[3]/div[23]/div[2]/input[1]')
  216. if clicker == "":
  217. raise NoSuchElementException
  218. count += 1
  219. except NoSuchElementException:
  220. has_next_page = False
  221. except Exception as e:
  222. print(link, e)
  223. i += 1
  224. print("Crawling the BlackPyramid market done.")
  225. # Returns 'True' if the link is a description link
  226. #@param: url of any url crawled
  227. #return: true if is a description page, false if not
  228. def isDescriptionLink(url):
  229. if 'products' in url:
  230. return True
  231. return False
  232. # Returns True if the link is a listingPage link
  233. #@param: url of any url crawled
  234. #return: true if is a Listing page, false if not
  235. def isListingLink(url):
  236. if 'search' in url:
  237. return True
  238. return False
  239. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  240. #@param: link from interested link list ie. getInterestingLinks()
  241. #return: list of description links that should be crawled through
  242. def productPages(html):
  243. soup = BeautifulSoup(html, "html.parser")
  244. return blackpyramid_links_parser(soup)
  245. def crawler():
  246. startCrawling()
  247. # print("Crawling and Parsing BlackPyramid .... DONE!")