this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

279 lines
9.0 KiB

  1. __author__ = 'DarkWeb'
  2. '''
  3. Nemesis Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. import subprocess
  17. from bs4 import BeautifulSoup
  18. from Forums.Initialization.prepare_parser import new_parse
  19. from Forums.NemesisForums.parser import nemesisforums_links_parser
  20. from Forums.Utilities.utilities import cleanHTML
  21. counter = 1
  22. baseURL = 'http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/communities'
  23. # Opens Tor Browser, crawls the website
  24. def startCrawling():
  25. forumName = getForumName()
  26. driver = getAccess()
  27. if driver != 'down':
  28. try:
  29. login(driver)
  30. crawlForum(driver)
  31. except Exception as e:
  32. print(driver.current_url, e)
  33. closeDriver(driver)
  34. # new_parse(forumName, baseURL, True)
  35. # Login using premade account credentials and do login captcha manually
  36. def login(driver):
  37. # wait for listing page show up (This Xpath may need to change based on different seed url)
  38. # wait for 50 sec until id = tab_content is found, then cont
  39. WebDriverWait(driver, 120).until(EC.visibility_of_element_located(
  40. (By.XPATH, '/html/body/div/div[1]/div')))
  41. # Returns the name of the website
  42. def getForumName() -> str:
  43. name = 'NemesisForums'
  44. return name
  45. # Return the link of the website
  46. def getFixedURL():
  47. url = 'http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/communities'
  48. return url
  49. # Closes Tor Browser
  50. def closeDriver(driver):
  51. # global pid
  52. # os.system("taskkill /pid " + str(pro.pid))
  53. # os.system("taskkill /t /f /im tor.exe")
  54. print('Closing Tor...')
  55. driver.close() #close tab
  56. time.sleep(3)
  57. return
  58. # Creates FireFox 'driver' and configure its 'Profile'
  59. # to use Tor proxy and socket
  60. def createFFDriver():
  61. from Forums.Initialization.forums_mining import config
  62. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  63. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  64. ff_prof.set_preference("places.history.enabled", False)
  65. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  66. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  67. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  68. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  69. ff_prof.set_preference("signon.rememberSignons", False)
  70. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  71. ff_prof.set_preference("network.dns.disablePrefetch", True)
  72. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  73. ff_prof.set_preference("permissions.default.image", 3)
  74. ff_prof.set_preference("browser.download.folderList", 2)
  75. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  76. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  77. ff_prof.set_preference('network.proxy.type', 1)
  78. ff_prof.set_preference("network.proxy.socks_version", 5)
  79. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  80. ff_prof.set_preference('network.proxy.socks_port', 9150)
  81. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  82. ff_prof.set_preference("javascript.enabled", True)
  83. ff_prof.update_preferences()
  84. service = Service(config.get('TOR', 'geckodriver_path'))
  85. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  86. driver.maximize_window()
  87. return driver
  88. def getAccess():
  89. url = getFixedURL()
  90. driver = createFFDriver()
  91. try:
  92. driver.get(url)
  93. return driver
  94. except:
  95. driver.close()
  96. return 'down'
  97. # Saves the crawled html page
  98. def savePage(driver, page, url):
  99. cleanPage = cleanHTML(driver, page)
  100. filePath = getFullPathName(url)
  101. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  102. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  103. return
  104. # Gets the full path of the page to be saved along with its appropriate file name
  105. def getFullPathName(url):
  106. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  107. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages")
  108. fileName = getNameFromURL(url)
  109. if not isListingLink(url):
  110. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  111. else:
  112. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  113. return fullPath
  114. # Creates the file name from passed URL
  115. def getNameFromURL(url):
  116. global counter
  117. name = ''.join(e for e in url if e.isalnum())
  118. if name == '':
  119. name = str(counter)
  120. counter = counter + 1
  121. return name
  122. def getInterestedLinks():
  123. links = []
  124. # Carding
  125. links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Carding')
  126. # Hacking
  127. # links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Hacking')
  128. # # Programming
  129. # links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Programming')
  130. # # Malware
  131. # links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Malware')
  132. # DDOS
  133. links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/DDoS')
  134. # fraud
  135. # links.append('http://nemesis55gdxo6emcigofp26nmjokadvmvsbnauloweoa47v2aap2ead.onion/n/Fraud')
  136. return links
  137. def crawlForum(driver):
  138. print("Crawling the Nemesis forum")
  139. linksToCrawl = getInterestedLinks()
  140. i = 0
  141. while i < len(linksToCrawl):
  142. link = linksToCrawl[i]
  143. print('Crawling :', link)
  144. try:
  145. has_next_page = True
  146. count = 0
  147. while has_next_page:
  148. try:
  149. driver.get(link)
  150. except:
  151. driver.refresh()
  152. html = driver.page_source
  153. savePage(driver, html, link)
  154. topics = topicPages(html)
  155. for topic in topics:
  156. has_next_topic_page = True
  157. counter = 1
  158. page = topic
  159. while has_next_topic_page:
  160. itemURL = urlparse.urljoin(baseURL, str(page))
  161. try:
  162. driver.get(itemURL)
  163. except:
  164. driver.refresh()
  165. if isListingLink(driver.current_url):
  166. break
  167. savePage(driver, driver.page_source, topic + f"page{counter}") # very important
  168. # comment out
  169. if counter == 2:
  170. break
  171. try:
  172. page = driver.find_element(by=By.XPATH, value='//a[contains(text(), ">")]').get_attribute('href')
  173. if page == "":
  174. raise NoSuchElementException
  175. counter += 1
  176. except NoSuchElementException:
  177. has_next_topic_page = False
  178. # making sure we go back to the listing page (browser back button simulation)
  179. try:
  180. driver.get(link)
  181. except:
  182. driver.refresh()
  183. # # comment out
  184. # break
  185. #
  186. # comment out
  187. if count == 1:
  188. break
  189. try:
  190. link = driver.find_element(by=By.XPATH, value='//a[contains(text(), ">")]').get_attribute('href')
  191. if link == "":
  192. raise NoSuchElementException
  193. count += 1
  194. except NoSuchElementException:
  195. has_next_page = False
  196. except Exception as e:
  197. print(link, e)
  198. i += 1
  199. print("Crawling the Nemesis forum done.")
  200. # Returns 'True' if the link is Topic link, may need to change for every website
  201. def isDescriptionLink(url):
  202. if 'post' in url:
  203. return True
  204. return False
  205. # Returns True if the link is a listingPage link, may need to change for every website
  206. def isListingLink(url):
  207. if '.onion/n/' in url:
  208. return True
  209. return False
  210. # calling the parser to define the links
  211. def topicPages(html):
  212. soup = BeautifulSoup(html, "html.parser")
  213. return nemesisforums_links_parser(soup)
  214. def crawler():
  215. startCrawling()
  216. # print("Crawling and Parsing BestCardingWorld .... DONE!")