this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

292 lines
9.6 KiB

1 year ago
1 year ago
  1. __author__ = 'Helium'
  2. '''
  3. Altenens Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. from datetime import date
  17. import configparser
  18. import subprocess
  19. from bs4 import BeautifulSoup
  20. from Forums.Initialization.prepare_parser import new_parse
  21. from Forums.Altenens.parser import altenens_links_parser
  22. from Forums.Utilities.utilities import cleanHTML
  23. counter = 1
  24. baseURL = 'https://altenens.is/'
  25. # Opens Tor Browser, crawls the website
  26. def startCrawling():
  27. forumName = getForumName()
  28. driver = getAccess()
  29. if driver != 'down':
  30. try:
  31. login(driver)
  32. crawlForum(driver)
  33. except Exception as e:
  34. print(driver.current_url, e)
  35. closeDriver(driver)
  36. new_parse(forumName, baseURL, True)
  37. # Login using premade account credentials and do login captcha manually
  38. def login(driver):
  39. #click login button
  40. login_link = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[1]/div/div/div/div[1]/a[1]').get_attribute('href')
  41. driver.get(login_link) # open tab with url
  42. #entering username and password into input boxes
  43. usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/form/div[1]/div/dl[1]/dd/input')
  44. #Username here
  45. usernameBox.send_keys('mylittlepony45')#sends string to the username box
  46. passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[4]/div/div/div[3]/div/div/div/form/div[1]/div/dl[2]/dd/div/div/input')
  47. #Password here
  48. passwordBox.send_keys('johnnyTest@18')# sends string to passwordBox
  49. input("Press ENTER when CAPTCHA is completed\n")
  50. # wait for listing page show up (This Xpath may need to change based on different seed url)
  51. # wait for 50 sec until id = tab_content is found, then cont
  52. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  53. (By.XPATH, '/html/body/div[1]/div[1]/div/div/div/div[1]/a[1]')))
  54. # Returns the name of the website
  55. def getForumName():
  56. name = 'Altenens'
  57. return name
  58. # Return the link of the website
  59. def getFixedURL():
  60. url = 'https://altenens.is/'
  61. return url
  62. # Closes Tor Browser
  63. def closeDriver(driver):
  64. # global pid
  65. # os.system("taskkill /pid " + str(pro.pid))
  66. # os.system("taskkill /t /f /im tor.exe")
  67. print('Closing Tor...')
  68. driver.close() #close tab
  69. time.sleep(3)
  70. return
  71. # Creates FireFox 'driver' and configure its 'Profile'
  72. # to use Tor proxy and socket
  73. def createFFDriver():
  74. from Forums.Initialization.forums_mining import config
  75. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  76. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  77. ff_prof.set_preference("places.history.enabled", False)
  78. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  79. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  80. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  81. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  82. ff_prof.set_preference("signon.rememberSignons", False)
  83. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  84. # ff_prof.set_preference("network.dns.disablePrefetch", True)
  85. # ff_prof.set_preference("network.http.sendRefererHeader", 0)
  86. ff_prof.set_preference("permissions.default.image", 3)
  87. ff_prof.set_preference("browser.download.folderList", 2)
  88. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  89. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  90. ff_prof.set_preference('network.proxy.type', 1)
  91. ff_prof.set_preference("network.proxy.socks_version", 5)
  92. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  93. ff_prof.set_preference('network.proxy.socks_port', 9150)
  94. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  95. ff_prof.set_preference("javascript.enabled", True)
  96. ff_prof.update_preferences()
  97. service = Service(config.get('TOR', 'geckodriver_path'))
  98. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  99. driver.maximize_window()
  100. return driver
  101. def getAccess():
  102. url = getFixedURL()
  103. driver = createFFDriver()
  104. try:
  105. driver.get(url)# open url in browser
  106. return driver
  107. except:
  108. driver.close()# close tab
  109. return 'down'
  110. # Saves the crawled html page
  111. def savePage(driver, html, url):
  112. cleanPage = cleanHTML(driver, html)
  113. filePath = getFullPathName(url)
  114. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  115. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  116. return
  117. # Gets the full path of the page to be saved along with its appropriate file name
  118. def getFullPathName(url):
  119. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  120. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages")
  121. fileName = getNameFromURL(url)
  122. if isDescriptionLink(url):
  123. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  124. else:
  125. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  126. return fullPath
  127. # Creates the file name from passed URL
  128. def getNameFromURL(url):
  129. global counter
  130. name = ''.join(e for e in url if e.isalnum())
  131. if (name == ''):
  132. name = str(counter)
  133. counter = counter + 1
  134. return name
  135. def getInterestedLinks():
  136. links = []
  137. # Hacking
  138. links.append('https://altenens.is/forums/hacking.469162/')
  139. # Hacking showoff
  140. links.append('https://altenens.is/forums/hacking-showoff.469232/')
  141. # Remote administration
  142. links.append('https://altenens.is/forums/remote-administration.469161/')
  143. # Cracking tools
  144. links.append('https://altenens.is/forums/cracking-tools.469204/')
  145. # Cracking tutorials
  146. links.append('https://altenens.is/forums/cracking-tutorials-other-methods.469205/')
  147. # Combo lists and configs
  148. links.append('https://altenens.is/forums/combolists-and-configs.469206/')
  149. # Programming
  150. links.append('https://altenens.is/forums/programming.469239/')
  151. return links
  152. # newest version of crawling
  153. def crawlForum(driver):
  154. print("Crawling the Altenens forum")
  155. linksToCrawl = getInterestedLinks()
  156. i = 0
  157. while i < len(linksToCrawl):
  158. link = linksToCrawl[i]
  159. print('Crawling :', link)
  160. try:
  161. has_next_page = True
  162. count = 0
  163. while has_next_page:
  164. try:
  165. driver.get(link)
  166. except:
  167. driver.refresh()
  168. html = driver.page_source
  169. savePage(driver, html, link)
  170. topics = topicPages(html)
  171. for topic in topics:
  172. has_next_topic_page = True
  173. counter = 1
  174. page = topic
  175. while has_next_topic_page:
  176. itemURL = urlparse.urljoin(baseURL, str(page))
  177. try:
  178. driver.get(itemURL)
  179. except:
  180. driver.refresh()
  181. savePage(driver, driver.page_source, topic + f"page{counter}") # very important
  182. # comment out
  183. if counter == 2:
  184. break
  185. try:
  186. page = driver.find_element(By.LINK_TEXT, value='Next').get_attribute('href')
  187. if page == "":
  188. raise NoSuchElementException
  189. counter += 1
  190. except NoSuchElementException:
  191. has_next_topic_page = False
  192. for j in range(counter):
  193. driver.back()
  194. # comment out
  195. break
  196. # comment out
  197. if count == 1:
  198. break
  199. try:
  200. link = driver.find_element(by=By.LINK_TEXT, value='Next').get_attribute('href')
  201. if link == "":
  202. raise NoSuchElementException
  203. count += 1
  204. except NoSuchElementException:
  205. has_next_page = False
  206. except Exception as e:
  207. print(link, e)
  208. i += 1
  209. print("Crawling the Altenens forum done.")
  210. # Returns 'True' if the link is Topic link, may need to change for every website
  211. def isDescriptionLink(url):
  212. if 'threads' in url:
  213. return True
  214. return False
  215. # Returns True if the link is a listingPage link, may need to change for every website
  216. def isListingLink(url):
  217. if 'forums' in url:
  218. return True
  219. return False
  220. # calling the parser to define the links
  221. def topicPages(html):
  222. soup = BeautifulSoup(html, "html.parser")
  223. #print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text)
  224. return altenens_links_parser(soup)
  225. def crawler():
  226. startCrawling()
  227. # print("Crawling and Parsing BestCardingWorld .... DONE!")