this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

313 lines
11 KiB

  1. __author__ = 'Helium'
  2. '''
  3. AbyssForum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. from datetime import date
  17. import configparser
  18. import subprocess
  19. from bs4 import BeautifulSoup
  20. from Forums.Initialization.prepare_parser import new_parse
  21. from Forums.AbyssForum.parser import abyssForum_links_parser
  22. from Forums.Utilities.utilities import cleanHTML
  23. config = configparser.ConfigParser()
  24. config.read('../../setup.ini')
  25. counter = 1
  26. baseURL = 'http://qyvjopwdgjq52ehsx6paonv2ophy3p4ivfkul4svcaw6qxlzsaboyjid.onion/'
  27. # Opens Tor Browser, crawls the website
  28. def startCrawling():
  29. opentor()
  30. # forumName = getForumName()
  31. driver = getAccess()
  32. if driver != 'down':
  33. try:
  34. login(driver)
  35. crawlForum(driver)
  36. except Exception as e:
  37. print(driver.current_url, e)
  38. closetor(driver)
  39. # new_parse(forumName, baseURL, False)
  40. # Opens Tor Browser
  41. def opentor():
  42. global pid
  43. print("Connecting Tor...")
  44. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  45. pid = pro.pid
  46. time.sleep(7.5)
  47. input('Tor Connected. Press ENTER to continue\n')
  48. return
  49. # Login using premade account credentials and do login captcha manually
  50. def login(driver):
  51. # wait for listing page show up (This Xpath may need to change based on different seed url)
  52. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  53. (By.XPATH, '//*[@id="sn-category-3"]')))
  54. # Returns the name of the website
  55. def getForumName():
  56. name = 'AbyssForum'
  57. return name
  58. # Return the link of the website
  59. def getFixedURL():
  60. url = 'http://qyvjopwdgjq52ehsx6paonv2ophy3p4ivfkul4svcaw6qxlzsaboyjid.onion/'
  61. return url
  62. # Closes Tor Browser
  63. def closetor(driver):
  64. # global pid
  65. # os.system("taskkill /pid " + str(pro.pid))
  66. # os.system("taskkill /t /f /im tor.exe")
  67. print('Closing Tor...')
  68. driver.close()
  69. time.sleep(3)
  70. return
  71. # Creates FireFox 'driver' and configure its 'Profile'
  72. # to use Tor proxy and socket
  73. def createFFDriver():
  74. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  75. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  76. ff_prof.set_preference("places.history.enabled", False)
  77. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  78. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  79. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  80. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  81. ff_prof.set_preference("signon.rememberSignons", False)
  82. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  83. ff_prof.set_preference("network.dns.disablePrefetch", True)
  84. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  85. ff_prof.set_preference("permissions.default.image", 3)
  86. ff_prof.set_preference("browser.download.folderList", 2)
  87. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  88. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  89. ff_prof.set_preference('network.proxy.type', 1)
  90. ff_prof.set_preference("network.proxy.socks_version", 5)
  91. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  92. ff_prof.set_preference('network.proxy.socks_port', 9150)
  93. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  94. ff_prof.set_preference("javascript.enabled", True)
  95. ff_prof.update_preferences()
  96. service = Service(config.get('TOR', 'geckodriver_path'))
  97. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  98. return driver
  99. def getAccess():
  100. url = getFixedURL()
  101. driver = createFFDriver()
  102. try:
  103. driver.get(url)
  104. return driver
  105. except:
  106. driver.close()
  107. return 'down'
  108. # Saves the crawled html page
  109. def savePage(page, url):
  110. cleanPage = cleanHTML(page)
  111. filePath = getFullPathName(url)
  112. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  113. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  114. return
  115. # Gets the full path of the page to be saved along with its appropriate file name
  116. def getFullPathName(url):
  117. fileName = getNameFromURL(url)
  118. if isDescriptionLink(url):
  119. #..\CryptBB\HTML_Pages\\
  120. fullPath = r'..\AbyssForum\HTML_Pages\\' + str(
  121. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  122. "%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html'
  123. else:
  124. fullPath = r'..\AbyssForum\HTML_Pages\\' + str(
  125. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  126. "%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html'
  127. return fullPath
  128. # Creates the file name from passed URL
  129. def getNameFromURL(url):
  130. global counter
  131. name = ''.join(e for e in url if e.isalnum())
  132. if (name == ''):
  133. name = str(counter)
  134. counter = counter + 1
  135. return name
  136. def getInterestedLinks():
  137. links = []
  138. # Hacked Database
  139. links.append('http://qyvjopwdgjq52ehsx6paonv2ophy3p4ivfkul4svcaw6qxlzsaboyjid.onion/viewforum.php?f=26&sid=6f7add746810784861a7ec31703a3757')
  140. # Hire a Hacker
  141. # links.append('http://qyvjopwdgjq52ehsx6paonv2ophy3p4ivfkul4svcaw6qxlzsaboyjid.onion/viewforum.php?f=27&sid=6f7add746810784861a7ec31703a3757')
  142. # # Hacking Tools
  143. # links.append('http://qyvjopwdgjq52ehsx6paonv2ophy3p4ivfkul4svcaw6qxlzsaboyjid.onion/viewforum.php?f=28&sid=6f7add746810784861a7ec31703a3757')
  144. # # Carding Forums
  145. # links.append('http://qyvjopwdgjq52ehsx6paonv2ophy3p4ivfkul4svcaw6qxlzsaboyjid.onion/viewforum.php?f=30&sid=6f7add746810784861a7ec31703a3757')
  146. # # Social Media Hacking
  147. # links.append('http://qyvjopwdgjq52ehsx6paonv2ophy3p4ivfkul4svcaw6qxlzsaboyjid.onion/viewforum.php?f=32&sid=6f7add746810784861a7ec31703a3757')
  148. # # Hacking Tutorials
  149. # links.append('http://qyvjopwdgjq52ehsx6paonv2ophy3p4ivfkul4svcaw6qxlzsaboyjid.onion/viewforum.php?f=12&sid=6f7add746810784861a7ec31703a3757')
  150. # # Cracking Tutorials
  151. # links.append('http://qyvjopwdgjq52ehsx6paonv2ophy3p4ivfkul4svcaw6qxlzsaboyjid.onion/viewforum.php?f=13&sid=6f7add746810784861a7ec31703a3757')
  152. return links
  153. def crawlForum(driver):
  154. print("Crawling the AbyssForum forum")
  155. linksToCrawl = getInterestedLinks()
  156. visited = set(linksToCrawl)
  157. initialTime = time.time()
  158. i = 0
  159. count = 0
  160. while i < len(linksToCrawl):
  161. link = linksToCrawl[i]
  162. print('Crawling :', link)
  163. try:
  164. try:
  165. driver.get(link)
  166. except:
  167. driver.refresh()
  168. html = driver.page_source
  169. savePage(html, link)
  170. has_next_page = True
  171. while has_next_page:
  172. list = topicPages(html)
  173. for item in list:
  174. itemURL = urlparse.urljoin(baseURL, str(item))
  175. try:
  176. driver.get(itemURL)
  177. except:
  178. driver.refresh()
  179. savePage(driver.page_source, item)
  180. driver.back()
  181. '''
  182. #variable to check if there is a next page for the topic
  183. has_next_topic_page = True
  184. counter = 1
  185. # check if there is a next page for the topics
  186. while has_next_topic_page:
  187. # try to access next page of th topic
  188. itemURL = urlparse.urljoin(baseURL, str(item))
  189. try:
  190. driver.get(itemURL)
  191. except:
  192. driver.refresh()
  193. savePage(driver.page_source, item)
  194. # if there is a next page then go and save....
  195. # next page in the topic?
  196. try:
  197. temp = driver.find_element(By.XPATH, '/html/body/div/div[2]/div/div[2]/div') # /html/body/div/div[2]/div/div[2]/div/
  198. item = temp.find_element(by=By.CLASS_NAME, value='pagination_next').get_attribute('href') #/html/body/div/div[2]/div/div[2]/div
  199. if item == "":
  200. raise NoSuchElementException
  201. has_next_topic_page = False
  202. else:
  203. counter += 1
  204. except NoSuchElementException:
  205. has_next_topic_page = False
  206. # end of loop
  207. for i in range(counter):
  208. driver.back()
  209. '''
  210. # comment out
  211. break
  212. # comment out
  213. if count == 1:
  214. count = 0
  215. break
  216. try:
  217. link = driver.find_element(by=By.XPATH, value = '/html/body/div[2]/div[2]/div[2]/div[2]/ul/li[9]/a').get_attribute('href')
  218. if link == "":
  219. raise NoSuchElementException
  220. try:
  221. driver.get(link)
  222. except:
  223. driver.refresh()
  224. html = driver.page_source
  225. savePage(html, link)
  226. count += 1
  227. except NoSuchElementException:
  228. has_next_page = False
  229. except Exception as e:
  230. print(link, e)
  231. i += 1
  232. # finalTime = time.time()
  233. # print finalTime - initialTime
  234. input("Crawling AbyssForum forum done sucessfully. Press ENTER to continue\n")
  235. # Returns 'True' if the link is Topic link
  236. def isDescriptionLink(url):
  237. if 'viewtopic' in url:
  238. return True
  239. return False
  240. # Returns True if the link is a listingPage link
  241. def isListingLink(url):
  242. if 'viewforum' in url:
  243. return True
  244. return False
  245. # calling the parser to define the links
  246. def topicPages(html):
  247. soup = BeautifulSoup(html, "html.parser")
  248. #print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text)
  249. return abyssForum_links_parser(soup)
  250. def crawler():
  251. startCrawling()
  252. # print("Crawling and Parsing Abyss .... DONE!")