this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

295 lines
9.5 KiB

1 year ago
1 year ago
  1. __author__ = 'Helium'
  2. '''
  3. Procrax Forum Crawler (Selenium)
  4. rechecked and confirmed
  5. '''
  6. from selenium import webdriver
  7. from selenium.common.exceptions import NoSuchElementException
  8. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  9. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  10. from selenium.webdriver.firefox.service import Service
  11. from selenium.webdriver.common.by import By
  12. from selenium.webdriver.support import expected_conditions as EC
  13. from selenium.webdriver.support.ui import WebDriverWait
  14. from PIL import Image
  15. import urllib.parse as urlparse
  16. import os, re, time
  17. from datetime import date
  18. import configparser
  19. import subprocess
  20. from bs4 import BeautifulSoup
  21. from Forums.Initialization.prepare_parser import new_parse
  22. from Forums.Procrax.parser import procrax_links_parser
  23. from Forums.Utilities.utilities import cleanHTML
  24. counter = 1
  25. BASE_URL = 'https://procrax.cx/'
  26. FORUM_NAME = 'Procrax'
  27. # Opens Tor Browser, crawls the website
  28. def startCrawling():
  29. driver = getAccess()
  30. if driver != 'down':
  31. try:
  32. login(driver)
  33. crawlForum(driver)
  34. except Exception as e:
  35. print(driver.current_url, e)
  36. closeDriver(driver)
  37. new_parse(
  38. forum=FORUM_NAME,
  39. url=BASE_URL,
  40. createLog=True
  41. )
  42. # Login using premade account credentials and do login captcha manually
  43. def login(driver):
  44. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  45. (By.XPATH, '/html/body/div[1]/div[3]/div[2]/div[3]/div[2]/div[1]/form/div/div/div/dl[4]/dd/div/div[2]/button/span')))
  46. #entering username and password into input boxes
  47. usernameBox = driver.find_element(by=By.NAME, value='login')
  48. #Username here
  49. usernameBox.send_keys('cheese_pizza_man')#sends string to the username box
  50. passwordBox = driver.find_element(by=By.NAME, value='password')
  51. #Password here
  52. passwordBox.send_keys('Gr33nSp@m&3ggs')# sends string to passwordBox
  53. clicker = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div[3]/div[2]/div[3]/div[2]/div[1]/form/div/div/div/dl[4]/dd/div/div[2]/button/span')
  54. clicker.click()
  55. # # wait for listing page show up (This Xpath may need to change based on different seed url)
  56. # # wait for 50 sec until id = tab_content is found, then cont
  57. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  58. (By.XPATH, '/html/body/div[1]/div[3]/div[2]/div[3]/div[1]/div/div[1]/div')))
  59. # Returns the name of the website
  60. def getForumName():
  61. name = 'Procrax'
  62. return name
  63. # Return the link of the website
  64. def getFixedURL():
  65. url = 'https://procrax.cx/'
  66. return url
  67. # Closes Tor Browser
  68. def closeDriver(driver):
  69. # global pid
  70. # os.system("taskkill /pid " + str(pro.pid))
  71. # os.system("taskkill /t /f /im tor.exe")
  72. print('Closing Tor...')
  73. driver.close() #close tab
  74. time.sleep(3)
  75. return
  76. # Creates FireFox 'driver' and configure its 'Profile'
  77. # to use Tor proxy and socket
  78. def createFFDriver():
  79. from Forums.Initialization.forums_mining import config
  80. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  81. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  82. ff_prof.set_preference("places.history.enabled", False)
  83. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  84. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  85. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  86. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  87. ff_prof.set_preference("signon.rememberSignons", False)
  88. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  89. ff_prof.set_preference("network.dns.disablePrefetch", True)
  90. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  91. ff_prof.set_preference("permissions.default.image", 3)
  92. ff_prof.set_preference("browser.download.folderList", 2)
  93. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  94. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  95. ff_prof.set_preference('network.proxy.type', 1)
  96. ff_prof.set_preference("network.proxy.socks_version", 5)
  97. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  98. ff_prof.set_preference('network.proxy.socks_port', 9150)
  99. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  100. ff_prof.set_preference("javascript.enabled", True)
  101. ff_prof.update_preferences()
  102. service = Service(config.get('TOR', 'geckodriver_path'))
  103. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  104. driver.maximize_window()
  105. return driver
  106. def getAccess():
  107. driver = createFFDriver()
  108. try:
  109. driver.get(BASE_URL)# open url in browser
  110. return driver
  111. except:
  112. driver.close()# close tab
  113. return 'down'
  114. # Saves the crawled html page
  115. def savePage(driver, page, url):
  116. cleanPage = cleanHTML(driver, page)
  117. filePath = getFullPathName(url)
  118. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  119. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  120. return
  121. # Gets the full path of the page to be saved along with its appropriate file name
  122. def getFullPathName(url):
  123. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  124. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + FORUM_NAME + "/HTML_Pages")
  125. fileName = getNameFromURL(url)
  126. if isDescriptionLink(url):
  127. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  128. else:
  129. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  130. return fullPath
  131. # Creates the file name from passed URL
  132. def getNameFromURL(url):
  133. global counter
  134. name = ''.join(e for e in url if e.isalnum())
  135. if (name == ''):
  136. name = str(counter)
  137. counter = counter + 1
  138. return name
  139. def getInterestedLinks():
  140. links = []
  141. # # general hacking
  142. links.append('https://procrax.cx/forums/general-hacking.24/')
  143. # # hacking security tools
  144. # links.append('https://procrax.cx/forums/hacking-security-tools.20/')
  145. # # hacktube
  146. # links.append('https://procrax.cx/forums/hacktube.22/')
  147. # # cardable
  148. # links.append('https://procrax.cx/forums/cardable-websites.28/')
  149. # # tools
  150. # links.append('https://procrax.cx/forums/tools-bots-validators.73/')
  151. # general forum
  152. # links.append('https://procrax.cx/forums/forum-discussions-updates.7/')
  153. return links
  154. def crawlForum(driver):
  155. print("Crawling the Procrax forum")
  156. linksToCrawl = getInterestedLinks()
  157. i = 0
  158. while i < len(linksToCrawl):
  159. link = linksToCrawl[i]
  160. print('Crawling :', link)
  161. try:
  162. has_next_page = True
  163. count = 0
  164. while has_next_page:
  165. try:
  166. driver.get(link)
  167. except:
  168. driver.refresh()
  169. html = driver.page_source
  170. savePage(driver, html, link)
  171. topics = topicPages(html)
  172. for topic in topics:
  173. has_next_topic_page = True
  174. counter = 1
  175. page = topic
  176. while has_next_topic_page:
  177. itemURL = urlparse.urljoin(BASE_URL, str(page))
  178. try:
  179. driver.get(itemURL)
  180. except:
  181. driver.refresh()
  182. savePage(driver, driver.page_source, topic + f"page{counter}") # very important
  183. # comment out
  184. if counter == 2:
  185. break
  186. try:
  187. page = driver.find_element(By.LINK_TEXT, value='Next').get_attribute('href')
  188. if page == "":
  189. raise NoSuchElementException
  190. counter += 1
  191. except NoSuchElementException:
  192. has_next_topic_page = False
  193. for i in range(counter):
  194. driver.back()
  195. # comment out
  196. # break
  197. # comment out
  198. if count == 1:
  199. break
  200. try:
  201. link = driver.find_element(by=By.LINK_TEXT, value='Next').get_attribute('href')
  202. if link == "":
  203. raise NoSuchElementException
  204. count += 1
  205. except NoSuchElementException:
  206. has_next_page = False
  207. except Exception as e:
  208. print(link, e)
  209. i += 1
  210. print("Crawling the Procrax forum done.")
  211. # Returns 'True' if the link is Topic link, may need to change for every website
  212. def isDescriptionLink(url):
  213. if 'threads' in url:
  214. return True
  215. return False
  216. # Returns True if the link is a listingPage link, may need to change for every website
  217. def isListingLink(url):
  218. if 'forums' in url:
  219. return True
  220. return False
  221. # calling the parser to define the links
  222. def topicPages(html):
  223. soup = BeautifulSoup(html, "html.parser")
  224. #print(soup.find('div', id="container").find('div', id="content").find('table', {"class": "tborder clear"}).find('tbody').find('tr',{"class": "inline_row"}).find('strong').text)
  225. return procrax_links_parser(soup)
  226. def crawler():
  227. startCrawling()
  228. # print("Crawling and Parsing BestCardingWorld .... DONE!")