this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

274 lines
8.8 KiB

  1. __author__ = 'DarkWeb'
  2. '''
  3. CryptBB Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. #from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. import subprocess
  17. from bs4 import BeautifulSoup
  18. from Forums.Initialization.prepare_parser import new_parse
  19. from Forums.Endchan.parser import endchan_links_parser
  20. from Forums.Utilities.utilities import cleanHTML
  21. counter = 1
  22. baseURL = 'http://enxx3byspwsdo446jujc52ucy2pf5urdbhqw3kbsfhlfjwmbpj5smdad.onion/'
  23. # Opens Tor Browser, crawls the website
  24. def startCrawling():
  25. forumName = getForumName()
  26. driver = getAccess()
  27. if driver != 'down':
  28. try:
  29. login(driver)
  30. crawlForum(driver)
  31. except Exception as e:
  32. print(driver.current_url, e)
  33. closeDriver(driver)
  34. new_parse(forumName, baseURL, True)
  35. # Login using premade account credentials and do login captcha manually
  36. def login(driver):
  37. input("Press ENTER when CAPTCHA is completed\n")
  38. # wait for listing page show up (This Xpath may need to change based on different seed url)
  39. # wait for 50 sec until id = tab_content is found, then cont
  40. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  41. (By.XPATH, '//*[@id="tab_content"]')))
  42. # Returns the name of the website
  43. def getForumName() -> str:
  44. name = 'Endchan'
  45. return name
  46. # Return the link of the website
  47. def getFixedURL():
  48. url = 'http://enxx3byspwsdo446jujc52ucy2pf5urdbhqw3kbsfhlfjwmbpj5smdad.onion/'
  49. return url
  50. # Closes Tor Browser
  51. def closeDriver(driver):
  52. # global pid
  53. # os.system("taskkill /pid " + str(pro.pid))
  54. # os.system("taskkill /t /f /im tor.exe")
  55. print('Closing Tor...')
  56. driver.close() # close tab
  57. time.sleep(3)
  58. return
  59. # Creates FireFox 'driver' and configure its 'Profile'
  60. # to use Tor proxy and socket
  61. def createFFDriver():
  62. from Forums.Initialization.forums_mining import config
  63. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  64. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  65. ff_prof.set_preference("places.history.enabled", False)
  66. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  67. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  68. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  69. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  70. ff_prof.set_preference("signon.rememberSignons", False)
  71. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  72. ff_prof.set_preference("network.dns.disablePrefetch", True)
  73. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  74. ff_prof.set_preference("permissions.default.image", 3)
  75. ff_prof.set_preference("browser.download.folderList", 2)
  76. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  77. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  78. ff_prof.set_preference('network.proxy.type', 1)
  79. ff_prof.set_preference("network.proxy.socks_version", 5)
  80. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  81. ff_prof.set_preference('network.proxy.socks_port', 9150)
  82. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  83. ff_prof.set_preference("javascript.enabled", True)
  84. ff_prof.update_preferences()
  85. service = Service(config.get('TOR', 'geckodriver_path'))
  86. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  87. driver.maximize_window()
  88. return driver
  89. def getAccess():
  90. url = getFixedURL()
  91. driver = createFFDriver()
  92. try:
  93. driver.get(url)
  94. return driver
  95. except:
  96. driver.close()
  97. return 'down'
  98. # Saves the crawled html page
  99. def savePage(driver, page, url):
  100. cleanPage = cleanHTML(driver, page)
  101. filePath = getFullPathName(url)
  102. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  103. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  104. return
  105. # Gets the full path of the page to be saved along with its appropriate file name
  106. def getFullPathName(url):
  107. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  108. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages")
  109. fileName = getNameFromURL(url)
  110. if not isListingLink(url):
  111. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  112. else:
  113. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  114. return fullPath
  115. # Creates the file name from passed URL
  116. def getNameFromURL(url):
  117. global counter
  118. name = ''.join(e for e in url if e.isalnum())
  119. if name == '':
  120. name = str(counter)
  121. counter = counter + 1
  122. return name
  123. def getInterestedLinks():
  124. links = []
  125. # /tech/ - Technology
  126. links.append('http://enxx3byspwsdo446jujc52ucy2pf5urdbhqw3kbsfhlfjwmbpj5smdad.onion/tech/')
  127. # /g/ - Technolo/g/y
  128. links.append('http://enxx3byspwsdo446jujc52ucy2pf5urdbhqw3kbsfhlfjwmbpj5smdad.onion/g/')
  129. # /os/ - Online Security
  130. links.append('http://enxx3byspwsdo446jujc52ucy2pf5urdbhqw3kbsfhlfjwmbpj5smdad.onion/os/')
  131. # /hack/ - Hacking
  132. links.append('http://enxx3byspwsdo446jujc52ucy2pf5urdbhqw3kbsfhlfjwmbpj5smdad.onion/hack/')
  133. # http://enxx3byspwsdo446jujc52ucy2pf5urdbhqw3kbsfhlfjwmbpj5smdad.onion / tech + g + markov + os + agdg + cyber + HTML + 2600
  134. return links
  135. def crawlForum(driver):
  136. print("Crawling the Endchan forum")
  137. linksToCrawl = getInterestedLinks()
  138. i = 0
  139. while i < len(linksToCrawl):
  140. link = linksToCrawl[i]
  141. print('Crawling :', link)
  142. try:
  143. has_next_page = True
  144. count = 0
  145. while has_next_page:
  146. try:
  147. driver.get(link)
  148. except:
  149. driver.refresh()
  150. html = driver.page_source
  151. savePage(driver, html, link)
  152. topics = topicPages(html)
  153. for topic in topics:
  154. has_next_topic_page = True
  155. counter = 1
  156. page = topic
  157. while has_next_topic_page:
  158. itemURL = urlparse.urljoin(baseURL, str(page))
  159. try:
  160. driver.get(itemURL)
  161. except:
  162. driver.refresh()
  163. if isListingLink(driver.current_url):
  164. break
  165. savePage(driver, driver.page_source, topic + f"page{counter}") # very important
  166. # # comment out
  167. # if counter == 2:
  168. # break
  169. try:
  170. page = driver.find_element(By.ID, value='linkNext').get_attribute('href')
  171. if page == "":
  172. raise NoSuchElementException
  173. counter += 1
  174. except NoSuchElementException:
  175. has_next_topic_page = False
  176. # making sure we go back to the listing page (browser back button simulation)
  177. try:
  178. driver.get(link)
  179. except:
  180. driver.refresh()
  181. # # comment out
  182. # break
  183. #
  184. # # comment out
  185. # if count == 1:
  186. # break
  187. try:
  188. link = driver.find_element(By.ID, value='linkNext').get_attribute('href')
  189. if link == "":
  190. raise NoSuchElementException
  191. count += 1
  192. except NoSuchElementException:
  193. has_next_page = False
  194. except Exception as e:
  195. print(link, e)
  196. i += 1
  197. print("Crawling the Endchan forum done.")
  198. # Returns 'True' if the link is Topic link, may need to change for every website
  199. def isDescriptionLink(url):
  200. if '/res/' in url:
  201. return True
  202. return False
  203. # Returns True if the link is a listingPage link
  204. def isListingLink(url):
  205. if re.match(".*onion/.*/" , url) and '/res/' not in url:
  206. return True
  207. return False
  208. # calling the parser to define the links
  209. def topicPages(html):
  210. soup = BeautifulSoup(html, "html.parser")
  211. return endchan_links_parser(soup)
  212. def crawler():
  213. startCrawling()