this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

299 lines
9.3 KiB

1 year ago
  1. __author__ = 'DarkWeb'
  2. '''
  3. Dread Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. import urllib.parse as urlparse
  14. import os, re, time
  15. from datetime import date
  16. import subprocess
  17. from bs4 import BeautifulSoup
  18. from Forums.Initialization.prepare_parser import new_parse
  19. from Forums.Dread.parser import dread_links_parser
  20. from Forums.Utilities.utilities import cleanHTML
  21. counter = 1
  22. baseURL = 'http://dreadytofatroptsdj6io7l3xptbet6onoyno2yv7jicoxknyazubrad.onion/'
  23. # Opens Tor Browser, crawls the website
  24. def startCrawling():
  25. # opentor()
  26. # forumName = getForumName()
  27. driver = getAccess()
  28. if driver != 'down':
  29. login(driver)
  30. crawlForum(driver)
  31. closetor(driver)
  32. # new_parse(forumName, False)
  33. # Opens Tor Browser
  34. def opentor():
  35. global pid
  36. print("Connecting Tor...")
  37. path = open('../../path.txt').readline().strip()
  38. pro = subprocess.Popen(path)
  39. pid = pro.pid
  40. time.sleep(7.5)
  41. input('Tor Connected. Press ENTER to continue\n')
  42. return
  43. # Login using premade account credentials and do login captcha manually
  44. def login(driver):
  45. '''
  46. # code for captcha, for now, it runs too slow so captcha expires
  47. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  48. (By.CSS_SELECTOR, ".image")))
  49. inputBoxes = driver.find_elements(by=By.TAG_NAME, value='input')
  50. for index, inputBox in enumerate(inputBoxes):
  51. driver.find_element(by=By.CSS_SELECTOR, value='.image').screenshot(r'..\Dread\captcha.png')
  52. im = Image.open(r'..\Dread\captcha.png')
  53. im.show()
  54. userIn = input("Enter character: ")
  55. inputBox.send_keys(userIn)
  56. im.close()
  57. if index != 5:
  58. inputBoxes[index+1].click()
  59. driver.find_element(by=By.XPATH, value="/html/body/div/div[2]/div/form/div/input").click()
  60. '''
  61. input("Press ENTER when CAPTCHA is completed\n")
  62. #entering username and password into input boxes
  63. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  64. (By.XPATH, "/html/body/div/div[2]")))
  65. # Returns the name of the website
  66. def getForumName():
  67. name = 'Dread'
  68. return name
  69. # Return the link of the website
  70. def getFixedURL():
  71. url = 'http://dreadytofatroptsdj6io7l3xptbet6onoyno2yv7jicoxknyazubrad.onion/'
  72. return url
  73. # Closes Tor Browser
  74. def closetor(driver):
  75. global pid
  76. # os.system("taskkill /pid " + str(pro.pid))
  77. os.system("taskkill /t /f /im tor.exe")
  78. print('Closing Tor...')
  79. driver.close()
  80. time.sleep(3)
  81. return
  82. # Creates FireFox 'driver' and configure its 'Profile'
  83. # to use Tor proxy and socket
  84. def createFFDriver():
  85. file = open('../../path.txt', 'r')
  86. lines = file.readlines()
  87. ff_binary = FirefoxBinary(lines[0].strip())
  88. ff_prof = FirefoxProfile(lines[1].strip())
  89. ff_prof.set_preference("places.history.enabled", False)
  90. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  91. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  92. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  93. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  94. ff_prof.set_preference("signon.rememberSignons", False)
  95. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  96. ff_prof.set_preference("network.dns.disablePrefetch", True)
  97. # ff_prof.set_preference("network.http.sendRefererHeader", 0)
  98. ff_prof.set_preference("permissions.default.image", 3)
  99. ff_prof.set_preference("browser.download.folderList", 2)
  100. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  101. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  102. ff_prof.set_preference('network.proxy.type', 1)
  103. ff_prof.set_preference("network.proxy.socks_version", 5)
  104. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  105. ff_prof.set_preference('network.proxy.socks_port', 9150)
  106. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  107. ff_prof.set_preference("javascript.enabled", True)
  108. ff_prof.set_preference("xpinstall.signatures.required", False);
  109. ff_prof.update_preferences()
  110. service = Service(lines[2].strip())
  111. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  112. return driver
  113. def getAccess():
  114. url = getFixedURL()
  115. driver = createFFDriver()
  116. try:
  117. driver.get(url)
  118. return driver
  119. except:
  120. return 'down'
  121. # Saves the crawled html page
  122. def savePage(page, url):
  123. cleanPage = cleanHTML(page)
  124. filePath = getFullPathName(url)
  125. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  126. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  127. return
  128. # Gets the full path of the page to be saved along with its appropriate file name
  129. def getFullPathName(url):
  130. fileName = getNameFromURL(url)
  131. if isDescriptionLink(url):
  132. fullPath = r'..\Dread\HTML_Pages\\' + str(
  133. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  134. "%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html'
  135. else:
  136. fullPath = r'..\Dread\HTML_Pages\\' + str(
  137. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  138. "%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html'
  139. return fullPath
  140. # Creates the file name from passed URL
  141. def getNameFromURL(url):
  142. global counter
  143. name = ''.join(e for e in url if e.isalnum())
  144. if (name == ''):
  145. name = str(counter)
  146. counter = counter + 1
  147. return name
  148. def getInterestedLinks():
  149. links = []
  150. # OpSec
  151. # links.append('http://dreadytofatroptsdj6io7l3xptbet6onoyno2yv7jicoxknyazubrad.onion/d/OpSec?p=40')
  152. # links.append('http://dreadytofatroptsdj6io7l3xptbet6onoyno2yv7jicoxknyazubrad.onion/d/OpSec')
  153. # # Hacking 180
  154. links.append('http://dreadytofatroptsdj6io7l3xptbet6onoyno2yv7jicoxknyazubrad.onion/d/hacking')
  155. # # Jobs4Crypto
  156. links.append('http://dreadytofatroptsdj6io7l3xptbet6onoyno2yv7jicoxknyazubrad.onion/d/Jobs4Crypto')
  157. # # Hacktown
  158. links.append('http://dreadytofatroptsdj6io7l3xptbet6onoyno2yv7jicoxknyazubrad.onion/d/HackTown')
  159. # # Malware
  160. # links.append('http://dreadytofatroptsdj6io7l3xptbet6onoyno2yv7jicoxknyazubrad.onion/d/malware')
  161. # # Programming
  162. links.append('http://dreadytofatroptsdj6io7l3xptbet6onoyno2yv7jicoxknyazubrad.onion/d/programming')
  163. return links
  164. def crawlForum(driver):
  165. print("Crawling the Dread forum")
  166. linksToCrawl = getInterestedLinks()
  167. visited = set(linksToCrawl)
  168. initialTime = time.time()
  169. i = 0
  170. count = 0
  171. while i < len(linksToCrawl):
  172. link = linksToCrawl[i]
  173. print('Crawling :', link)
  174. try:
  175. try:
  176. driver.get(link)
  177. except:
  178. driver.refresh()
  179. html = driver.page_source
  180. savePage(html, link)
  181. has_next_page = True
  182. while has_next_page:
  183. list = topicPages(html)
  184. for item in list:
  185. itemURL = urlparse.urljoin(baseURL, str(item))
  186. try:
  187. driver.get(itemURL)
  188. except:
  189. driver.refresh()
  190. savePage(driver.page_source, item)
  191. driver.back()
  192. break
  193. if count == 1:
  194. count = 0
  195. break
  196. try:
  197. temp = driver.find_element(by=By.CLASS_NAME, value="pagination")
  198. link = temp.find_element(by=By.CLASS_NAME, value="next").get_attribute('href')
  199. if link == "":
  200. raise NoSuchElementException
  201. try:
  202. driver.get(link)
  203. except:
  204. driver.refresh()
  205. html = driver.page_source
  206. savePage(html, link)
  207. count += 1
  208. except NoSuchElementException:
  209. has_next_page = False
  210. except Exception as e:
  211. print(link, e.message)
  212. i += 1
  213. # finalTime = time.time()
  214. # print finalTime - initialTime
  215. input("Crawling Dread forum done sucessfully. Press ENTER to continue\n")
  216. # Returns 'True' if the link is Topic link
  217. def isDescriptionLink(url):
  218. if '/post/' in url:
  219. return True
  220. return False
  221. # Returns True if the link is a listingPage link
  222. def isListingLink(url):
  223. if '/d/' in url:
  224. return True
  225. return False
  226. # calling the parser to define the links
  227. def topicPages(html):
  228. soup = BeautifulSoup(html, "html.parser")
  229. #print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text)
  230. return dread_links_parser(soup)
  231. def crawler():
  232. startCrawling()
  233. # print("Crawling and Parsing BestCardingWorld .... DONE!")