this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

355 lines
12 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. '''
  3. CryptBB Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. from datetime import date
  17. import subprocess
  18. from bs4 import BeautifulSoup
  19. from Forums.Initialization.prepare_parser import new_parse
  20. from Forums.CryptBB.parser import cryptBB_links_parser
  21. from Forums.Utilities.utilities import cleanHTML
  22. counter = 1
  23. baseURL = 'http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/'
  24. # Opens Tor Browser, crawls the website
  25. def startCrawling():
  26. opentor()
  27. # forumName = getForumName()
  28. driver = getAccess()
  29. if driver != 'down':
  30. try:
  31. login(driver)
  32. crawlForum(driver)
  33. except Exception as e:
  34. print(driver.current_url, e)
  35. closetor(driver)
  36. # new_parse(forumName, False)
  37. # Opens Tor Browser
  38. def opentor():
  39. global pid
  40. print("Connecting Tor...")
  41. path = open('../../path.txt').readline().strip()
  42. pro = subprocess.Popen(path)
  43. pid = pro.pid
  44. time.sleep(7.5)
  45. input('Tor Connected. Press ENTER to continue\n')
  46. return
  47. # Login using premade account credentials and do login captcha manually
  48. def login(driver):
  49. #click login button
  50. login_link = driver.find_element(
  51. by=By.XPATH, value='/html/body/div/div[2]/div/table/tbody/tr[2]/td/center/pre/strong/a[1]').\
  52. get_attribute('href')
  53. driver.get(login_link)# open tab with url
  54. #entering username and password into input boxes
  55. usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/form/table/tbody/tr[2]/td[2]/input')
  56. #Username here
  57. usernameBox.send_keys('holyre')#sends string to the username box
  58. passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div/div[2]/div/form/table/tbody/tr[3]/td[2]/input')
  59. #Password here
  60. passwordBox.send_keys('PlatinumBorn2')# sends string to passwordBox
  61. '''
  62. # wait for captcha page show up
  63. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  64. (By.XPATH, "/html/body/div/div[2]/div/form/div/input")))
  65. # save captcha to local
  66. driver.find_element(by=By.XPATH, value='//*[@id="captcha_img"]').screenshot(r'..\CryptBB\captcha.png')
  67. # This method will show image in any image viewer
  68. im = Image.open(r'..\CryptBB\captcha.png')
  69. im.show()
  70. # wait until input space show up
  71. inputBox = driver.find_element(by=By.XPATH, value='//*[@id="imagestring"]')
  72. # ask user input captcha solution in terminal
  73. userIn = input("Enter solution: ")
  74. # send user solution into the input space
  75. inputBox.send_keys(userIn)
  76. # click the verify(submit) button
  77. driver.find_element(by=By.XPATH, value="/html/body/div/div[2]/div/form/div/input").click()
  78. '''
  79. input("Press ENTER when CAPTCHA is completed\n")
  80. # wait for listing page show up (This Xpath may need to change based on different seed url)
  81. # wait for 50 sec until id = tab_content is found, then cont
  82. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  83. (By.XPATH, '//*[@id="tab_content"]')))
  84. # Returns the name of the website
  85. def getForumName():
  86. name = 'CryptBB'
  87. return name
  88. # Return the link of the website
  89. def getFixedURL():
  90. url = 'http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/'
  91. return url
  92. # Closes Tor Browser
  93. def closetor(driver):
  94. # global pid
  95. # os.system("taskkill /pid " + str(pro.pid))
  96. # os.system("taskkill /t /f /im tor.exe")
  97. print('Closing Tor...')
  98. driver.close() #close tab
  99. time.sleep(3)
  100. return
  101. # Creates FireFox 'driver' and configure its 'Profile'
  102. # to use Tor proxy and socket
  103. def createFFDriver():
  104. file = open('../../path.txt', 'r')
  105. lines = file.readlines()
  106. ff_binary = FirefoxBinary(lines[0].strip())
  107. ff_prof = FirefoxProfile(lines[1].strip())
  108. ff_prof.set_preference("places.history.enabled", False)
  109. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  110. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  111. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  112. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  113. ff_prof.set_preference("signon.rememberSignons", False)
  114. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  115. ff_prof.set_preference("network.dns.disablePrefetch", True)#
  116. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  117. ff_prof.set_preference("permissions.default.image", 3)
  118. ff_prof.set_preference("browser.download.folderList", 2)
  119. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  120. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  121. ff_prof.set_preference('network.proxy.type', 1)
  122. ff_prof.set_preference("network.proxy.socks_version", 5)
  123. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  124. ff_prof.set_preference('network.proxy.socks_port', 9150)
  125. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  126. ff_prof.set_preference("javascript.enabled", True)
  127. ff_prof.update_preferences()
  128. service = Service(lines[2].strip())
  129. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  130. return driver
  131. def getAccess():
  132. url = getFixedURL()
  133. driver = createFFDriver()
  134. try:
  135. driver.get(url)# open url in browser
  136. return driver
  137. except:
  138. driver.close()# close tab
  139. return 'down'
  140. # Saves the crawled html page
  141. def savePage(page, url):
  142. cleanPage = cleanHTML(page)
  143. filePath = getFullPathName(url)
  144. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  145. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  146. return
  147. # Gets the full path of the page to be saved along with its appropriate file name
  148. def getFullPathName(url):
  149. fileName = getNameFromURL(url)
  150. if isDescriptionLink(url):
  151. fullPath = r'..\\CryptBB\\HTML_Pages\\' + str(
  152. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  153. "%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html'
  154. else:
  155. fullPath = r'..\\CryptBB\\HTML_Pages\\' + str(
  156. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  157. "%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html'
  158. return fullPath
  159. # Creates the file name from passed URL
  160. def getNameFromURL(url):
  161. global counter
  162. name = ''.join(e for e in url if e.isalnum())
  163. if (name == ''):
  164. name = str(counter)
  165. counter = counter + 1
  166. return name
  167. def getInterestedLinks():
  168. links = []
  169. # # Beginner Programming
  170. links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=86')
  171. # # Beginner Carding and Fraud
  172. # links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=91')
  173. # # Beginner Hacking
  174. # links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=87')
  175. # # Newbie
  176. # links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=84')
  177. # # Beginner Hardware
  178. # links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=89')
  179. # # Training Challenges
  180. # links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=96')
  181. # Darknet Discussions
  182. #links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=88')
  183. # # Public Leaks and Warez
  184. # links.append('http://cryptbbtg65gibadeeo2awe3j7s6evg7eklserehqr4w4e2bis5tebid.onion/forumdisplay.php?fid=97')
  185. # # Hacked Accounts and Database Dumps
  186. # links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=30')
  187. # # Android Moded pak
  188. # links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=53')
  189. return links
  190. def crawlForum(driver):
  191. print("Crawling the CryptBB forum")
  192. linksToCrawl = getInterestedLinks()
  193. visited = set(linksToCrawl)
  194. initialTime = time.time()
  195. i = 0
  196. count = 0
  197. while i < len(linksToCrawl):
  198. link = linksToCrawl[i]
  199. print('Crawling :', link)
  200. try:
  201. try:
  202. driver.get(link)# open
  203. except:
  204. driver.refresh()
  205. html = driver.page_source
  206. savePage(html, link)
  207. has_next_page = True
  208. #loop through the topics
  209. while has_next_page:
  210. list = topicPages(html)# for multiple pages
  211. for item in list:
  212. #variable to check if there is a next page for the topic
  213. has_next_topic_page = True
  214. counter = 1
  215. # check if there is a next page for the topics
  216. while has_next_topic_page:
  217. # try to access next page of th topic
  218. itemURL = urlparse.urljoin(baseURL, str(item))
  219. try:
  220. driver.get(itemURL)
  221. except:
  222. driver.refresh()
  223. savePage(driver.page_source, item)
  224. # if there is a next page then go and save....
  225. # next page in the topic?
  226. try:
  227. temp = driver.find_element(By.XPATH, '/html/body/div/div[2]/div/div[2]/div') # /html/body/div/div[2]/div/div[2]/div/
  228. item = temp.find_element(by=By.CLASS_NAME, value='pagination_next').get_attribute('href') #/html/body/div/div[2]/div/div[2]/div
  229. if item == "":
  230. raise NoSuchElementException
  231. has_next_topic_page = False
  232. else:
  233. counter += 1
  234. except NoSuchElementException:
  235. has_next_topic_page = False
  236. #end of loop
  237. for i in range(counter):
  238. driver.back()
  239. # comment out
  240. #break
  241. # comment out
  242. #if count == 1:
  243. # count = 0
  244. # break
  245. try:# change depending on web page, #next page
  246. temp = driver.find_element(by=By.XPATH, value = '/html/body/div/div[2]/div/div[2]/div')
  247. link = temp.find_element(by=By.CLASS_NAME, value='pagination_next').get_attribute('href')
  248. if link == "":
  249. raise NoSuchElementException
  250. try:
  251. driver.get(link)
  252. except:
  253. driver.refresh()
  254. html = driver.page_source
  255. savePage(html, link)
  256. count += 1
  257. except NoSuchElementException:
  258. has_next_page = False
  259. except Exception as e:
  260. print(link, e)
  261. i += 1
  262. # finalTime = time.time()
  263. # print finalTime - initialTime
  264. input("Crawling CryptBB forum done successfully. Press ENTER to continue\n")
  265. # Returns 'True' if the link is Topic link, may need to change for every website
  266. def isDescriptionLink(url):
  267. if 'thread' in url:
  268. return True
  269. return False
  270. # Returns True if the link is a listingPage link, may need to change for every website
  271. def isListingLink(url):
  272. if 'forum' in url:
  273. return True
  274. return False
  275. # calling the parser to define the links
  276. def topicPages(html):
  277. soup = BeautifulSoup(html, "html.parser")
  278. #print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text)
  279. return cryptBB_links_parser(soup)
  280. def crawler():
  281. startCrawling()
  282. # print("Crawling and Parsing BestCardingWorld .... DONE!")