this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

330 lines
11 KiB

1 year ago
  1. __author__ = 'DarkWeb'
  2. '''
  3. Libre Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. import subprocess
  17. from bs4 import BeautifulSoup
  18. from Forums.Initialization.prepare_parser import new_parse
  19. from Forums.Libre.parser import libre_links_parser
  20. from Forums.Utilities.utilities import cleanHTML
  21. counter = 1
  22. baseURL = 'http://libreeunomyly6ot7kspglmbd5cvlkogib6rozy43r2glatc6rmwauqd.onion/'
  23. # Opens Tor Browser, crawls the website
  24. def startCrawling():
  25. opentor()
  26. forumName = getForumName()
  27. driver = getAccess()
  28. if driver != 'down':
  29. try:
  30. login(driver)
  31. crawlForum(driver)
  32. except Exception as e:
  33. print(driver.current_url, e)
  34. closetor(driver)
  35. new_parse(forumName, baseURL, False)
  36. # Opens Tor Browser
  37. def opentor():
  38. from Forums.Initialization.forums_mining import config
  39. global pid
  40. print("Connecting Tor...")
  41. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  42. pid = pro.pid
  43. time.sleep(7.5)
  44. input('Tor Connected. Press ENTER to continue\n')
  45. return
  46. # Login using premade account credentials and do login captcha manually
  47. def login(driver):
  48. input('Press enter when CAPTCHA is completed, and you\'re at the login page')
  49. #entering username and password into input boxes
  50. usernameBox = driver.find_element(by=By.NAME, value='login')
  51. #Username here
  52. usernameBox.send_keys('ct1234')#sends string to the username box
  53. passwordBox = driver.find_element(by=By.NAME, value='password')
  54. #Password here
  55. passwordBox.send_keys('r5o0wqmw')# sends string to passwordBox
  56. login = driver.find_element(by=By.CLASS_NAME, value='block-container')
  57. login_link = login.find_element(by=By.TAG_NAME, value='button')
  58. login_link.click()
  59. # input('input')
  60. # wait for listing page show up (This Xpath may need to change based on different seed url)
  61. # wait for 50 sec until id = tab_content is found, then cont
  62. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  63. (By.TAG_NAME, 'nav')))
  64. # click link to correct forum board
  65. login_link = driver.find_element(by=By.XPATH, value='/html/body/nav/div[1]/a[3]').get_attribute('href')
  66. driver.get(login_link) # open tab with url
  67. # wait for listing page show up (This Xpath may need to change based on different seed url)
  68. # wait for 50 sec until id = tab_content is found, then cont
  69. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  70. (By.XPATH, '/html/body/div/div/div[3]/div[5]')))
  71. # Returns the name of the website
  72. def getForumName() -> str:
  73. name = 'Libre'
  74. return name
  75. # Return the link of the website
  76. def getFixedURL():
  77. url = 'http://libreeunomyly6ot7kspglmbd5cvlkogib6rozy43r2glatc6rmwauqd.onion/'
  78. return url
  79. # Closes Tor Browser
  80. def closetor(driver):
  81. # global pid
  82. # os.system("taskkill /pid " + str(pro.pid))
  83. # os.system("taskkill /t /f /im tor.exe")
  84. print('Closing Tor...')
  85. driver.close() #close tab
  86. time.sleep(3)
  87. return
  88. # Creates FireFox 'driver' and configure its 'Profile'
  89. # to use Tor proxy and socket
  90. def createFFDriver():
  91. from Forums.Initialization.forums_mining import config
  92. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  93. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  94. ff_prof.set_preference("places.history.enabled", False)
  95. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  96. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  97. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  98. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  99. ff_prof.set_preference("signon.rememberSignons", False)
  100. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  101. ff_prof.set_preference("network.dns.disablePrefetch", True)
  102. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  103. ff_prof.set_preference("permissions.default.image", 3)
  104. ff_prof.set_preference("browser.download.folderList", 2)
  105. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  106. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  107. ff_prof.set_preference('network.proxy.type', 1)
  108. ff_prof.set_preference("network.proxy.socks_version", 5)
  109. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  110. ff_prof.set_preference('network.proxy.socks_port', 9150)
  111. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  112. ff_prof.set_preference("javascript.enabled", True)
  113. ff_prof.update_preferences()
  114. service = Service(config.get('TOR', 'geckodriver_path'))
  115. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  116. return driver
  117. def getAccess():
  118. url = getFixedURL()
  119. driver = createFFDriver()
  120. try:
  121. driver.get(url)
  122. return driver
  123. except:
  124. driver.close()
  125. return 'down'
  126. # Saves the crawled html page
  127. def savePage(page, url):
  128. cleanPage = cleanHTML(page)
  129. filePath = getFullPathName(url)
  130. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  131. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  132. return
  133. # Gets the full path of the page to be saved along with its appropriate file name
  134. def getFullPathName(url):
  135. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  136. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages")
  137. fileName = getNameFromURL(url)
  138. if isDescriptionLink(url):
  139. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  140. else:
  141. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  142. return fullPath
  143. # Creates the file name from passed URL
  144. def getNameFromURL(url):
  145. global counter
  146. name = ''.join(e for e in url if e.isalnum())
  147. if name == '':
  148. name = str(counter)
  149. counter = counter + 1
  150. return name
  151. def getInterestedLinks():
  152. links = []
  153. # # cyber security
  154. links.append('http://libreeunomyly6ot7kspglmbd5cvlkogib6rozy43r2glatc6rmwauqd.onion/c/CyberSecurity')
  155. # # services
  156. # links.append('http://libreeunomyly6ot7kspglmbd5cvlkogib6rozy43r2glatc6rmwauqd.onion/c/Services')
  157. # # programming
  158. # links.append('http://libreeunomyly6ot7kspglmbd5cvlkogib6rozy43r2glatc6rmwauqd.onion/c/Programming')
  159. return links
  160. def crawlForum(driver):
  161. print("Crawling the Libre forum")
  162. linksToCrawl = getInterestedLinks()
  163. visited = set(linksToCrawl)
  164. initialTime = time.time()
  165. i = 0
  166. count = 0
  167. while i < len(linksToCrawl):
  168. link = linksToCrawl[i]
  169. print('Crawling :', link)
  170. try:
  171. try:
  172. driver.get(link)
  173. except:
  174. driver.refresh()
  175. html = driver.page_source
  176. savePage(html, link)
  177. has_next_page = True
  178. while has_next_page:
  179. list = topicPages(html)
  180. for item in list:
  181. itemURL = urlparse.urljoin(baseURL, str(item))
  182. try:
  183. driver.get(itemURL)
  184. except:
  185. driver.refresh()
  186. savePage(driver.page_source, item)
  187. driver.back()
  188. #variable to check if there is a next page for the topic
  189. # has_next_topic_page = True
  190. # counter = 1
  191. # # check if there is a next page for the topics
  192. # while has_next_topic_page:
  193. # # try to access next page of th topic
  194. # itemURL = urlparse.urljoin(baseURL, str(item))
  195. # try:
  196. # driver.get(itemURL)
  197. # except:
  198. # driver.refresh()
  199. # savePage(driver.page_source, item)
  200. #
  201. # # if there is a next page then go and save....
  202. # # Spec
  203. # try:
  204. # # temp = driver.find_element(By.XPATH, '/html/body/div[2]/div[4]/div/div[5]/div[2]/div/div[1]/div[1]/div/nav/div[1]') # /html/body/div/div[2]/div/div[2]/div/
  205. # item = driver.find_element(by=By.LINK_TEXT, value='>').get_attribute('href') #/html/body/div/div[2]/div/div[2]/div
  206. #
  207. # if item == "":
  208. # raise NoSuchElementException
  209. # else:
  210. # counter += 1
  211. #
  212. # except NoSuchElementException:
  213. # has_next_topic_page = False
  214. #
  215. # # end of loop
  216. # for i in range(counter):
  217. # driver.back()
  218. # comment out
  219. break
  220. # comment out
  221. if count == 1:
  222. count = 0
  223. break
  224. try:
  225. # temp = driver.find_element(by=By.XPATH, value = '/html/body/div[2]/div[4]/div/div[5]/div[2]/div/div/div[1]/div/nav/div[1]')
  226. link = driver.find_element(by=By.LINK_TEXT, value='>').get_attribute('href')
  227. if link == "":
  228. raise NoSuchElementException
  229. try:
  230. driver.get(link)
  231. except:
  232. driver.refresh()
  233. html = driver.page_source
  234. savePage(html, link)
  235. count += 1
  236. except NoSuchElementException:
  237. has_next_page = False
  238. except Exception as e:
  239. print(link, e)
  240. i += 1
  241. # finalTime = time.time()
  242. # print finalTime - initialTime
  243. input("Crawling Libre forum done successfully. Press ENTER to continue\n")
  244. # Returns 'True' if the link is Topic link, may need to change for every website
  245. def isDescriptionLink(url):
  246. if '/p/' in url:
  247. return True
  248. return False
  249. # Returns True if the link is a listingPage link, may need to change for every website
  250. def isListingLink(url):
  251. if '/c/' in url:
  252. return True
  253. return False
  254. # calling the parser to define the links
  255. def topicPages(html):
  256. soup = BeautifulSoup(html, "html.parser")
  257. return libre_links_parser(soup)
  258. def crawler():
  259. startCrawling()
  260. # print("Crawling and Parsing BestCardingWorld .... DONE!")