this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

296 lines
9.4 KiB

  1. __author__ = 'DarkWeb'
  2. '''
  3. Cardingleaks Forum Crawler (Selenium)
  4. Crawler updated and fixed
  5. The site has this thing sometime where you'll have to look at a new post everyday. makes sure
  6. you login first before crawling.
  7. '''
  8. from selenium import webdriver
  9. from selenium.common.exceptions import NoSuchElementException
  10. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  11. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  12. from selenium.webdriver.firefox.service import Service
  13. from selenium.webdriver.common.by import By
  14. from selenium.webdriver.support import expected_conditions as EC
  15. from selenium.webdriver.support.ui import WebDriverWait
  16. from PIL import Image
  17. import urllib.parse as urlparse
  18. import os, re, time
  19. import subprocess
  20. from bs4 import BeautifulSoup
  21. from Forums.Initialization.prepare_parser import new_parse
  22. from Forums.Cardingleaks.parser import cardingleaks_links_parser
  23. from Forums.Utilities.utilities import cleanHTML
  24. counter = 1
  25. baseURL = 'https://leaks.ws/'
  26. # Opens Tor Browser, crawls the website
  27. def startCrawling():
  28. forumName = getForumName()
  29. driver = getAccess()
  30. if driver != 'down':
  31. try:
  32. login(driver)
  33. crawlForum(driver)
  34. except Exception as e:
  35. print(driver.current_url, e)
  36. closeDriver(driver)
  37. new_parse(forumName, baseURL, True)
  38. # Login using premade account credentials and do login captcha manually
  39. def login(driver):
  40. #click login button
  41. login_link = driver.find_element(
  42. by=By.XPATH, value='/html/body/div[2]/div[1]/nav/div/div[3]/div[1]/a[1]').\
  43. get_attribute('href')
  44. driver.get(login_link)# open tab with url
  45. #entering username and password into input boxes
  46. usernameBox = driver.find_element(by=By.NAME, value='login')
  47. #Username here
  48. usernameBox.send_keys('somanyfrogs')#sends string to the username box
  49. passwordBox = driver.find_element(by=By.NAME, value='password')
  50. #Password here
  51. passwordBox.send_keys('therearewaytoomanyherehowwhy')# sends string to passwordBox
  52. login = driver.find_element(by=By.CLASS_NAME, value='block-container')
  53. login_link = login.find_element(by=By.TAG_NAME, value='button')
  54. login_link.click()
  55. # input('input')
  56. # wait for listing page show up (This Xpath may need to change based on different seed url)
  57. # wait for 50 sec until id = tab_content is found, then cont
  58. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  59. (By.CLASS_NAME, 'p-body-pageContent')))
  60. # Returns the name of the website
  61. def getForumName() -> str:
  62. name = 'Cardingleaks'
  63. return name
  64. # Return the link of the website
  65. def getFixedURL():
  66. url = 'https://leaks.ws/'
  67. return url
  68. # Closes Tor Browser
  69. def closeDriver(driver):
  70. # global pid
  71. # os.system("taskkill /pid " + str(pro.pid))
  72. # os.system("taskkill /t /f /im tor.exe")
  73. print('Closing Tor...')
  74. driver.close() #close tab
  75. time.sleep(3)
  76. return
  77. # Creates FireFox 'driver' and configure its 'Profile'
  78. # to use Tor proxy and socket
  79. def createFFDriver():
  80. from Forums.Initialization.forums_mining import config
  81. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  82. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  83. ff_prof.set_preference("places.history.enabled", False)
  84. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  85. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  86. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  87. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  88. ff_prof.set_preference("signon.rememberSignons", False)
  89. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  90. ff_prof.set_preference("network.dns.disablePrefetch", True)
  91. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  92. ff_prof.set_preference("permissions.default.image", 3)
  93. ff_prof.set_preference("browser.download.folderList", 2)
  94. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  95. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  96. ff_prof.set_preference('network.proxy.type', 1)
  97. ff_prof.set_preference("network.proxy.socks_version", 5)
  98. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  99. ff_prof.set_preference('network.proxy.socks_port', 9150)
  100. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  101. ff_prof.set_preference("javascript.enabled", True)
  102. ff_prof.update_preferences()
  103. service = Service(config.get('TOR', 'geckodriver_path'))
  104. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  105. driver.maximize_window()
  106. return driver
  107. def getAccess():
  108. url = getFixedURL()
  109. driver = createFFDriver()
  110. try:
  111. driver.get(url)
  112. return driver
  113. except:
  114. driver.close()
  115. return 'down'
  116. # Saves the crawled html page
  117. def savePage(driver, page, url):
  118. cleanPage = cleanHTML(driver, page)
  119. filePath = getFullPathName(url)
  120. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  121. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  122. return
  123. # Gets the full path of the page to be saved along with its appropriate file name
  124. def getFullPathName(url):
  125. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  126. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + getForumName() + "/HTML_Pages")
  127. fileName = getNameFromURL(url)
  128. if isDescriptionLink(url):
  129. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  130. else:
  131. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  132. return fullPath
  133. # Creates the file name from passed URL
  134. def getNameFromURL(url):
  135. global counter
  136. name = ''.join(e for e in url if e.isalnum())
  137. if name == '':
  138. name = str(counter)
  139. counter = counter + 1
  140. return name
  141. def getInterestedLinks():
  142. links = []
  143. # carding methods
  144. links.append('https://leaks.ws/forums/carding-methods.82/')
  145. # carding schools
  146. links.append('https://leaks.ws/forums/help-desk-carding-school.35/')
  147. # carding discussion
  148. links.append('https://leaks.ws/forums/carding-discussion-desk.58/')
  149. # carding tutorials
  150. links.append('https://leaks.ws/forums/carding-tutorials.13/')
  151. # carding tools and software
  152. links.append('https://leaks.ws/forums/carding-tools-softwares.10/')
  153. # exploits and cracking tools
  154. links.append('https://leaks.ws/forums/exploits-cracking-tools.22/')
  155. return links
  156. def crawlForum(driver):
  157. print("Crawling the Cardingleaks forum")
  158. linksToCrawl = getInterestedLinks()
  159. i = 0
  160. while i < len(linksToCrawl):
  161. link = linksToCrawl[i]
  162. print('Crawling :', link)
  163. try:
  164. has_next_page = True
  165. count = 0
  166. while has_next_page:
  167. try:
  168. driver.get(link)
  169. except:
  170. driver.refresh()
  171. html = driver.page_source
  172. savePage(driver, html, link)
  173. topics = topicPages(html)
  174. for topic in topics:
  175. has_next_topic_page = True
  176. counter = 1
  177. page = topic
  178. while has_next_topic_page:
  179. itemURL = urlparse.urljoin(baseURL, str(page))
  180. try:
  181. driver.get(itemURL)
  182. except:
  183. driver.refresh()
  184. savePage(driver, driver.page_source, topic + f"page{counter}") # very important
  185. # comment out
  186. if counter == 2:
  187. break
  188. try:
  189. page = driver.find_element(by=By.LINK_TEXT, value='Next').get_attribute('href')
  190. if page == "":
  191. raise NoSuchElementException
  192. counter += 1
  193. except NoSuchElementException:
  194. has_next_topic_page = False
  195. for j in range(counter):
  196. driver.back()
  197. # comment out
  198. break
  199. # comment out
  200. if count == 1:
  201. break
  202. try:
  203. link = driver.find_element(by=By.LINK_TEXT, value='Next').get_attribute('href')
  204. if link == "":
  205. raise NoSuchElementException
  206. count += 1
  207. except NoSuchElementException:
  208. has_next_page = False
  209. except Exception as e:
  210. print(link, e)
  211. i += 1
  212. print("Crawling the Cardingleaks forum done.")
  213. # Returns 'True' if the link is Topic link, may need to change for every website
  214. def isDescriptionLink(url):
  215. if 'threads' in url:
  216. return True
  217. return False
  218. # Returns True if the link is a listingPage link, may need to change for every website
  219. def isListingLink(url):
  220. if 'forums' in url:
  221. return True
  222. return False
  223. # calling the parser to define the links
  224. def topicPages(html):
  225. soup = BeautifulSoup(html, "html.parser")
  226. return cardingleaks_links_parser(soup)
  227. def crawler():
  228. startCrawling()
  229. # print("Crawling and Parsing BestCardingWorld .... DONE!")