this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

318 lines
11 KiB

  1. __author__ = 'Helium'
  2. '''
  3. Altenens Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. from datetime import date
  17. import configparser
  18. import subprocess
  19. from bs4 import BeautifulSoup
  20. from Forums.Initialization.prepare_parser import new_parse
  21. from Forums.Altenens.parser import altenens_links_parser
  22. from Forums.Utilities.utilities import cleanHTML
  23. counter = 1
  24. baseURL = 'https://altenens.is/'
  25. # Opens Tor Browser, crawls the website
  26. def startCrawling():
  27. opentor()
  28. # forumName = getForumName()
  29. driver = getAccess()
  30. if driver != 'down':
  31. try:
  32. login(driver)
  33. crawlForum(driver)
  34. except Exception as e:
  35. print(driver.current_url, e)
  36. closetor(driver)
  37. # new_parse(forumName, False)
  38. # Opens Tor Browser
  39. def opentor():
  40. from Forums.Initialization.forums_mining import config
  41. global pid
  42. print("Connecting Tor...")
  43. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  44. pid = pro.pid
  45. time.sleep(7.5)
  46. input('Tor Connected. Press ENTER to continue\n')
  47. return
  48. # Login using premade account credentials and do login captcha manually
  49. def login(driver):
  50. #click login button
  51. login = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[1]/div/div/div/div[1]/a[1]').\
  52. get_attribute('href')
  53. driver.get(login)
  54. # login.click()
  55. # #entering username and password into input boxes
  56. # usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/form/div[1]/div/dl[1]/dd')
  57. # #Username here
  58. # usernameBox.send_keys('mylittlepony45')#sends string to the username box
  59. # passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/form/div[1]/div/dl[2]/dd/div/div')
  60. # #Password here
  61. # passwordBox.send_keys('johnnyTest@18')# sends string to passwordBox
  62. input("Press ENTER when CAPTCHA is completed\n")
  63. # wait for listing page show up (This Xpath may need to change based on different seed url)
  64. # wait for 50 sec until id = tab_content is found, then cont
  65. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  66. (By.XPATH, '///html/body/div[1]/div[4]/div/div/div[3]/div/div/div[4]/div/div/div[1]/div/div[1]/div[2]/ol/li[1]')))
  67. # Returns the name of the website
  68. def getForumName():
  69. name = 'Altenens'
  70. return name
  71. # Return the link of the website
  72. def getFixedURL():
  73. url = 'https://altenens.is/'
  74. return url
  75. # Closes Tor Browser
  76. def closetor(driver):
  77. # global pid
  78. # os.system("taskkill /pid " + str(pro.pid))
  79. # os.system("taskkill /t /f /im tor.exe")
  80. print('Closing Tor...')
  81. driver.close() #close tab
  82. time.sleep(3)
  83. return
  84. # Creates FireFox 'driver' and configure its 'Profile'
  85. # to use Tor proxy and socket
  86. def createFFDriver():
  87. from Forums.Initialization.forums_mining import config
  88. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  89. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  90. ff_prof.set_preference("places.history.enabled", False)
  91. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  92. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  93. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  94. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  95. ff_prof.set_preference("signon.rememberSignons", False)
  96. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  97. ff_prof.set_preference("network.dns.disablePrefetch", True)
  98. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  99. ff_prof.set_preference("permissions.default.image", 3)
  100. ff_prof.set_preference("browser.download.folderList", 2)
  101. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  102. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  103. ff_prof.set_preference('network.proxy.type', 1)
  104. ff_prof.set_preference("network.proxy.socks_version", 5)
  105. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  106. ff_prof.set_preference('network.proxy.socks_port', 9150)
  107. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  108. ff_prof.set_preference("javascript.enabled", True)
  109. ff_prof.update_preferences()
  110. service = Service(config.get('TOR', 'geckodriver_path'))
  111. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  112. return driver
  113. def getAccess():
  114. url = getFixedURL()
  115. driver = createFFDriver()
  116. try:
  117. driver.get(url)# open url in browser
  118. return driver
  119. except:
  120. driver.close()# close tab
  121. return 'down'
  122. # Saves the crawled html page
  123. def savePage(page, url):
  124. cleanPage = cleanHTML(page)
  125. filePath = getFullPathName(url)
  126. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  127. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  128. return
  129. # Gets the full path of the page to be saved along with its appropriate file name
  130. def getFullPathName(url):
  131. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  132. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages")
  133. fileName = getNameFromURL(url)
  134. if isDescriptionLink(url):
  135. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  136. else:
  137. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  138. return fullPath
  139. # Creates the file name from passed URL
  140. def getNameFromURL(url):
  141. global counter
  142. name = ''.join(e for e in url if e.isalnum())
  143. if (name == ''):
  144. name = str(counter)
  145. counter = counter + 1
  146. return name
  147. def getInterestedLinks():
  148. links = []
  149. # # Hacking Tools
  150. links.append('https://altenens.is/forums/hacking-tools.469165/')
  151. # # hash cracking
  152. # links.append('https://altenens.is/forums/hash-cracking.469167/')
  153. # # phishing and spamming
  154. # links.append('https://altenens.is/forums/phishing-and-spamming.469223/')
  155. # # pentesting
  156. # links.append('https://altenens.is/forums/pentesting.469169/')
  157. # # cracking tools
  158. # links.append('https://altenens.is/forums/cracking-tools.469204/')
  159. # # Cracking Tools
  160. # links.append('https://altenens.is/forums/cracking-tutorials-other-methods.469205/')
  161. return links
  162. def crawlForum(driver):
  163. print("Crawling the Altenens forum")
  164. linksToCrawl = getInterestedLinks()
  165. visited = set(linksToCrawl)
  166. initialTime = time.time()
  167. i = 0
  168. count = 0
  169. while i < len(linksToCrawl):
  170. link = linksToCrawl[i]
  171. print('Crawling :', link)
  172. try:
  173. try:
  174. driver.get(link)# open
  175. except:
  176. driver.refresh()
  177. html = driver.page_source
  178. savePage(html, link)
  179. has_next_page = True
  180. #loop through the topics
  181. while has_next_page:
  182. list = topicPages(html)# for multiple pages
  183. for item in list:
  184. #variable to check if there is a next page for the topic
  185. has_next_topic_page = True
  186. counter = 1
  187. # check if there is a next page for the topics
  188. while has_next_topic_page:
  189. # try to access next page of th topic
  190. itemURL = urlparse.urljoin(baseURL, str(item))
  191. try:
  192. driver.get(itemURL)
  193. except:
  194. driver.refresh()
  195. savePage(driver.page_source, item)
  196. # if there is a next page then go and save....
  197. # next page in the topic?
  198. try:
  199. item = driver.find_element(By.XPATH, '/html/body/div[1]/div[4]/div/div/div[3]/div/div/div[1]/div[1]/div[1]/nav/div[1]/a').get_attribute('href')
  200. if item == "":
  201. raise NoSuchElementException
  202. has_next_topic_page = False
  203. else:
  204. counter += 1
  205. except NoSuchElementException:
  206. has_next_topic_page = False
  207. #end of loop
  208. for i in range(counter):
  209. driver.back()
  210. # comment out
  211. break
  212. # comment out
  213. if count == 1:
  214. count = 0
  215. break
  216. try:# change depending on web page, #next page
  217. link = driver.find_element(by=By.XPATH, value = '/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/div[1]/div[1]/nav/div[1]/a').get_attribute('href')
  218. if link == "":
  219. raise NoSuchElementException
  220. try:
  221. driver.get(link)
  222. except:
  223. driver.refresh()
  224. html = driver.page_source
  225. savePage(html, link)
  226. count += 1
  227. except NoSuchElementException:
  228. has_next_page = False
  229. except Exception as e:
  230. print(link, e)
  231. i += 1
  232. # finalTime = time.time()
  233. # print finalTime - initialTime
  234. input("Crawling Altenens forum done successfully. Press ENTER to continue\n")
  235. # Returns 'True' if the link is Topic link, may need to change for every website
  236. def isDescriptionLink(url):
  237. if 'threads' in url:
  238. return True
  239. return False
  240. # Returns True if the link is a listingPage link, may need to change for every website
  241. def isListingLink(url):
  242. if 'forums' in url:
  243. return True
  244. return False
  245. # calling the parser to define the links
  246. def topicPages(html):
  247. soup = BeautifulSoup(html, "html.parser")
  248. #print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text)
  249. return altenens_links_parser(soup)
  250. def crawler():
  251. startCrawling()
  252. # print("Crawling and Parsing BestCardingWorld .... DONE!")