this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

314 lines
11 KiB

  1. __author__ = 'Helium'
  2. '''
  3. BlackPyramid Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.support.ui import WebDriverWait
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.common.by import By
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. from datetime import date
  17. import subprocess
  18. import configparser
  19. from bs4 import BeautifulSoup
  20. from MarketPlaces.Initialization.prepare_parser import new_parse
  21. from MarketPlaces.BlackPyramid.parser import blackpyramid_links_parser
  22. from MarketPlaces.Utilities.utilities import cleanHTML
  23. config = configparser.ConfigParser()
  24. config.read('../../setup.ini')
  25. counter = 1
  26. baseURL = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/login/'
  27. # Opens Tor Browser, crawls the website, then parses, then closes tor
  28. #acts like the main method for the crawler, another function at the end of this code calls this function later
  29. def startCrawling():
  30. opentor()
  31. # mktName = getMKTName()
  32. driver = getAccess()
  33. if driver != 'down':
  34. try:
  35. login(driver)
  36. crawlForum(driver)
  37. except Exception as e:
  38. print(driver.current_url, e)
  39. closetor(driver)
  40. # new_parse(forumName, baseURL, False)
  41. # Opens Tor Browser
  42. #prompts for ENTER input to continue
  43. def opentor():
  44. global pid
  45. print("Connecting Tor...")
  46. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  47. pid = pro.pid
  48. time.sleep(7.5)
  49. input('Tor Connected. Press ENTER to continue\n')
  50. return
  51. # Returns the name of the website
  52. #return: name of site in string type
  53. def getMKTName():
  54. name = 'BlackPyramid'
  55. return name
  56. # Return the base link of the website
  57. #return: url of base site in string type
  58. def getFixedURL():
  59. url = 'http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/'
  60. return url
  61. # Closes Tor Browser
  62. #@param: current selenium driver
  63. def closetor(driver):
  64. # global pid
  65. # os.system("taskkill /pid " + str(pro.pid))
  66. # os.system("taskkill /t /f /im tor.exe")
  67. print('Closing Tor...')
  68. driver.close()
  69. time.sleep(3)
  70. return
  71. # Creates FireFox 'driver' and configure its 'Profile'
  72. # to use Tor proxy and socket
  73. def createFFDriver():
  74. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  75. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  76. ff_prof.set_preference("places.history.enabled", False)
  77. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  78. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  79. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  80. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  81. ff_prof.set_preference("signon.rememberSignons", False)
  82. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  83. ff_prof.set_preference("network.dns.disablePrefetch", True)
  84. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  85. ff_prof.set_preference("permissions.default.image", 2)
  86. ff_prof.set_preference("browser.download.folderList", 2)
  87. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  88. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  89. ff_prof.set_preference('network.proxy.type', 1)
  90. ff_prof.set_preference("network.proxy.socks_version", 5)
  91. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  92. ff_prof.set_preference('network.proxy.socks_port', 9150)
  93. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  94. ff_prof.set_preference("javascript.enabled", False)
  95. ff_prof.update_preferences()
  96. service = Service(config.get('TOR', 'geckodriver_path'))
  97. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  98. return driver
  99. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  100. #return: return the selenium driver or string 'down'
  101. def getAccess():
  102. url = getFixedURL()
  103. driver = createFFDriver()
  104. try:
  105. driver.get(url)
  106. return driver
  107. except:
  108. driver.close()
  109. return 'down'
  110. # Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
  111. # then allows for manual solving of captcha in the terminal
  112. #@param: current selenium web driver
  113. def login(driver):
  114. # wait for login page
  115. login_link = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[3]/div/main/div/div/div/div[2]/div/div/div/section[1]/input[1]')
  116. login_link.click() # open tab with url
  117. # entering username and password into input boxes
  118. usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
  119. # Username here
  120. usernameBox.send_keys('ChipotleSteakBurrito')
  121. passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="password"]')
  122. # Password here
  123. passwordBox.send_keys('BlackBeans')
  124. input("Press ENTER when CAPTCHA is completed\n")
  125. # wait for listing page show up (This Xpath may need to change based on different seed url)
  126. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  127. (By.XPATH, '/html/body/div[2]/form/nav/nav/ul/li[2]/div/a/span[1]')))
  128. # Saves the crawled html page, makes the directory path for html pages if not made
  129. def savePage(page, url):
  130. cleanPage = cleanHTML(page)
  131. filePath = getFullPathName(url)
  132. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  133. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  134. return
  135. # Gets the full path of the page to be saved along with its appropriate file name
  136. #@param: raw url as crawler crawls through every site
  137. def getFullPathName(url):
  138. from MarketPlaces.Initialization.markets_mining import CURRENT_DATE
  139. fileName = getNameFromURL(url)
  140. if isDescriptionLink(url):
  141. fullPath = r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Description\\' + fileName + '.html'
  142. else:
  143. fullPath = r'..\BlackPyramid\HTML_Pages\\' + CURRENT_DATE + r'\\Listing\\' + fileName + '.html'
  144. return fullPath
  145. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  146. #@param: raw url as crawler crawls through every site
  147. def getNameFromURL(url):
  148. global counter
  149. name = ''.join(e for e in url if e.isalnum())
  150. if (name == ''):
  151. name = str(counter)
  152. counter = counter + 1
  153. return name
  154. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  155. #in this example, there are a couple of categories some threads fall under such as
  156. # Guides and Tutorials, Digital Products, and Software and Malware
  157. #as you can see they are categories of products
  158. def getInterestedLinks():
  159. links = []
  160. # Hacking Guides
  161. links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  162. # # Exploits
  163. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  164. # # botnets/malware
  165. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  166. # # fraud software
  167. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  168. # # Other Tools
  169. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  170. # # Services
  171. # links.append('http://blackpyoc3gbnrlvxqvvytd3kxqj7pd226i2gvfyhysj24ne2snkmnyd.onion/search/results/')
  172. return links
  173. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  174. #topic and description pages are crawled through here, where both types of pages are saved
  175. #@param: selenium driver
  176. def crawlForum(driver):
  177. print("Crawling the BlackPyramid market")
  178. linksToCrawl = getInterestedLinks()
  179. visited = set(linksToCrawl)
  180. initialTime = time.time()
  181. count = 0
  182. i = 0
  183. while i < len(linksToCrawl):
  184. link = linksToCrawl[i]
  185. print('Crawling :', link)
  186. try:
  187. try:
  188. clicker = driver.find_element(by=By.XPATH, value='/html/body/div[2]/form/nav/nav/ul/li[2]/div/a')
  189. clicker.click() # open tab with url
  190. driver.get(link)
  191. except:
  192. driver.refresh()
  193. html = driver.page_source
  194. savePage(html, link)
  195. has_next_page = True
  196. while has_next_page:
  197. list = productPages(html)
  198. for item in list:
  199. itemURL = urlparse.urljoin(baseURL, str(item))
  200. try:
  201. driver.get(itemURL)
  202. except:
  203. driver.refresh()
  204. savePage(driver.page_source, item)
  205. driver.back()
  206. # comment out
  207. break
  208. # comment out
  209. if count == 1:
  210. count = 0
  211. break
  212. try:
  213. clicker = driver.find_element(by=By.XPATH, value=
  214. '/html/body/center/div[4]/div/div[3]/div[23]/div[2]/input[1]')
  215. if clicker == "":
  216. raise NoSuchElementException
  217. try:
  218. clicker.click()
  219. except:
  220. driver.refresh()
  221. html = driver.page_source
  222. savePage(html, link)
  223. count += 1
  224. except NoSuchElementException:
  225. has_next_page = False
  226. except Exception as e:
  227. print(link, e)
  228. i += 1
  229. # finalTime = time.time()
  230. # print finalTime - initialTime
  231. input("Crawling BlackPyramid forum done sucessfully. Press ENTER to continue\n")
  232. # Returns 'True' if the link is a description link
  233. #@param: url of any url crawled
  234. #return: true if is a description page, false if not
  235. def isDescriptionLink(url):
  236. if 'products' in url:
  237. return True
  238. return False
  239. # Returns True if the link is a listingPage link
  240. #@param: url of any url crawled
  241. #return: true if is a Listing page, false if not
  242. def isListingLink(url):
  243. if 'search' in url:
  244. return True
  245. return False
  246. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  247. #@param: link from interested link list ie. getInterestingLinks()
  248. #return: list of description links that should be crawled through
  249. def productPages(html):
  250. soup = BeautifulSoup(html, "html.parser")
  251. return blackpyramid_links_parser(soup)
  252. def crawler():
  253. startCrawling()
  254. # print("Crawling and Parsing BlackPyramid .... DONE!")