this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

317 lines
11 KiB

  1. __author__ = 'Helium'
  2. '''
  3. Altenens Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. from datetime import date
  17. import configparser
  18. import subprocess
  19. from bs4 import BeautifulSoup
  20. from Forums.Initialization.prepare_parser import new_parse
  21. from Forums.Altenens.parser import altenens_links_parser
  22. from Forums.Utilities.utilities import cleanHTML
  23. config = configparser.ConfigParser()
  24. config.read('../../setup.ini')
  25. counter = 1
  26. baseURL = 'https://altenens.is/'
  27. # Opens Tor Browser, crawls the website
  28. def startCrawling():
  29. opentor()
  30. # forumName = getForumName()
  31. driver = getAccess()
  32. if driver != 'down':
  33. try:
  34. login(driver)
  35. crawlForum(driver)
  36. except Exception as e:
  37. print(driver.current_url, e)
  38. closetor(driver)
  39. # new_parse(forumName, False)
  40. # Opens Tor Browser
  41. def opentor():
  42. global pid
  43. print("Connecting Tor...")
  44. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  45. pid = pro.pid
  46. time.sleep(7.5)
  47. input('Tor Connected. Press ENTER to continue\n')
  48. return
  49. # Login using premade account credentials and do login captcha manually
  50. def login(driver):
  51. #click login button
  52. login = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[1]/div/div/div/div[1]/a[1]').\
  53. get_attribute('href')
  54. driver.get(login)
  55. # login.click()
  56. # #entering username and password into input boxes
  57. # usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/form/div[1]/div/dl[1]/dd')
  58. # #Username here
  59. # usernameBox.send_keys('mylittlepony45')#sends string to the username box
  60. # passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/form/div[1]/div/dl[2]/dd/div/div')
  61. # #Password here
  62. # passwordBox.send_keys('johnnyTest@18')# sends string to passwordBox
  63. input("Press ENTER when CAPTCHA is completed\n")
  64. # wait for listing page show up (This Xpath may need to change based on different seed url)
  65. # wait for 50 sec until id = tab_content is found, then cont
  66. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  67. (By.XPATH, '///html/body/div[1]/div[4]/div/div/div[3]/div/div/div[4]/div/div/div[1]/div/div[1]/div[2]/ol/li[1]')))
  68. # Returns the name of the website
  69. def getForumName():
  70. name = 'Altenens'
  71. return name
  72. # Return the link of the website
  73. def getFixedURL():
  74. url = 'https://altenens.is/'
  75. return url
  76. # Closes Tor Browser
  77. def closetor(driver):
  78. # global pid
  79. # os.system("taskkill /pid " + str(pro.pid))
  80. # os.system("taskkill /t /f /im tor.exe")
  81. print('Closing Tor...')
  82. driver.close() #close tab
  83. time.sleep(3)
  84. return
  85. # Creates FireFox 'driver' and configure its 'Profile'
  86. # to use Tor proxy and socket
  87. def createFFDriver():
  88. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  89. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  90. ff_prof.set_preference("places.history.enabled", False)
  91. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  92. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  93. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  94. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  95. ff_prof.set_preference("signon.rememberSignons", False)
  96. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  97. ff_prof.set_preference("network.dns.disablePrefetch", True)
  98. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  99. ff_prof.set_preference("permissions.default.image", 3)
  100. ff_prof.set_preference("browser.download.folderList", 2)
  101. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  102. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  103. ff_prof.set_preference('network.proxy.type', 1)
  104. ff_prof.set_preference("network.proxy.socks_version", 5)
  105. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  106. ff_prof.set_preference('network.proxy.socks_port', 9150)
  107. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  108. ff_prof.set_preference("javascript.enabled", True)
  109. ff_prof.update_preferences()
  110. service = Service(config.get('TOR', 'geckodriver_path'))
  111. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  112. return driver
  113. def getAccess():
  114. url = getFixedURL()
  115. driver = createFFDriver()
  116. try:
  117. driver.get(url)# open url in browser
  118. return driver
  119. except:
  120. driver.close()# close tab
  121. return 'down'
  122. # Saves the crawled html page
  123. def savePage(page, url):
  124. cleanPage = cleanHTML(page)
  125. filePath = getFullPathName(url)
  126. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  127. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  128. return
  129. # Gets the full path of the page to be saved along with its appropriate file name
  130. def getFullPathName(url):
  131. fileName = getNameFromURL(url)
  132. if isDescriptionLink(url):
  133. fullPath = r'..\\Altenens\\HTML_Pages\\' + str(
  134. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  135. "%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html'
  136. else:
  137. fullPath = r'..\\Altenens\\HTML_Pages\\' + str(
  138. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  139. "%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html'
  140. return fullPath
  141. # Creates the file name from passed URL
  142. def getNameFromURL(url):
  143. global counter
  144. name = ''.join(e for e in url if e.isalnum())
  145. if (name == ''):
  146. name = str(counter)
  147. counter = counter + 1
  148. return name
  149. def getInterestedLinks():
  150. links = []
  151. # # Hacking Tools
  152. links.append('https://altenens.is/forums/hacking-tools.469165/')
  153. # # hash cracking
  154. # links.append('https://altenens.is/forums/hash-cracking.469167/')
  155. # # phishing and spamming
  156. # links.append('https://altenens.is/forums/phishing-and-spamming.469223/')
  157. # # pentesting
  158. # links.append('https://altenens.is/forums/pentesting.469169/')
  159. # # cracking tools
  160. # links.append('https://altenens.is/forums/cracking-tools.469204/')
  161. # # Cracking Tools
  162. # links.append('https://altenens.is/forums/cracking-tutorials-other-methods.469205/')
  163. return links
  164. def crawlForum(driver):
  165. print("Crawling the Altenens forum")
  166. linksToCrawl = getInterestedLinks()
  167. visited = set(linksToCrawl)
  168. initialTime = time.time()
  169. i = 0
  170. count = 0
  171. while i < len(linksToCrawl):
  172. link = linksToCrawl[i]
  173. print('Crawling :', link)
  174. try:
  175. try:
  176. driver.get(link)# open
  177. except:
  178. driver.refresh()
  179. html = driver.page_source
  180. savePage(html, link)
  181. has_next_page = True
  182. #loop through the topics
  183. while has_next_page:
  184. list = topicPages(html)# for multiple pages
  185. for item in list:
  186. #variable to check if there is a next page for the topic
  187. has_next_topic_page = True
  188. counter = 1
  189. # check if there is a next page for the topics
  190. while has_next_topic_page:
  191. # try to access next page of th topic
  192. itemURL = urlparse.urljoin(baseURL, str(item))
  193. try:
  194. driver.get(itemURL)
  195. except:
  196. driver.refresh()
  197. savePage(driver.page_source, item)
  198. # if there is a next page then go and save....
  199. # next page in the topic?
  200. try:
  201. item = driver.find_element(By.XPATH, '/html/body/div[1]/div[4]/div/div/div[3]/div/div/div[1]/div[1]/div[1]/nav/div[1]/a').get_attribute('href')
  202. if item == "":
  203. raise NoSuchElementException
  204. has_next_topic_page = False
  205. else:
  206. counter += 1
  207. except NoSuchElementException:
  208. has_next_topic_page = False
  209. #end of loop
  210. for i in range(counter):
  211. driver.back()
  212. # comment out
  213. break
  214. # comment out
  215. if count == 1:
  216. count = 0
  217. break
  218. try:# change depending on web page, #next page
  219. link = driver.find_element(by=By.XPATH, value = '/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/div[1]/div[1]/nav/div[1]/a').get_attribute('href')
  220. if link == "":
  221. raise NoSuchElementException
  222. try:
  223. driver.get(link)
  224. except:
  225. driver.refresh()
  226. html = driver.page_source
  227. savePage(html, link)
  228. count += 1
  229. except NoSuchElementException:
  230. has_next_page = False
  231. except Exception as e:
  232. print(link, e)
  233. i += 1
  234. # finalTime = time.time()
  235. # print finalTime - initialTime
  236. input("Crawling Altenens forum done successfully. Press ENTER to continue\n")
  237. # Returns 'True' if the link is Topic link, may need to change for every website
  238. def isDescriptionLink(url):
  239. if 'threads' in url:
  240. return True
  241. return False
  242. # Returns True if the link is a listingPage link, may need to change for every website
  243. def isListingLink(url):
  244. if 'forums' in url:
  245. return True
  246. return False
  247. # calling the parser to define the links
  248. def topicPages(html):
  249. soup = BeautifulSoup(html, "html.parser")
  250. #print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text)
  251. return altenens_links_parser(soup)
  252. def crawler():
  253. startCrawling()
  254. # print("Crawling and Parsing BestCardingWorld .... DONE!")