this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

312 lines
11 KiB

  1. __author__ = 'DarkWeb'
  2. '''
  3. Abacus Marketplace Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.support.ui import WebDriverWait
  11. from selenium.webdriver.support.ui import Select
  12. from selenium.webdriver.support import expected_conditions as EC
  13. from selenium.webdriver.common.by import By
  14. from PIL import Image
  15. import urllib.parse as urlparse
  16. import os, re, time
  17. from datetime import date
  18. import subprocess
  19. import configparser
  20. from bs4 import BeautifulSoup
  21. from MarketPlaces.Initialization.prepare_parser import new_parse
  22. from MarketPlaces.Abacus.parser import abacus_links_parser
  23. from MarketPlaces.Utilities.utilities import cleanHTML
  24. counter = 1
  25. baseURL = 'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion'
  26. def startCrawling():
  27. mktName = getMKTName()
  28. driver = getAccess()
  29. if driver != 'down':
  30. try:
  31. login(driver)
  32. crawlForum(driver)
  33. except Exception as e:
  34. print(driver.current_url, e)
  35. closeDriver(driver)
  36. # new_parse(mktName, baseURL, True)
  37. # Returns the name of the website
  38. def getMKTName():
  39. name = 'Abacus'
  40. return name
  41. # Return the base link of the website
  42. def getFixedURL():
  43. url = 'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion'
  44. return url
  45. # Closes Tor Browser
  46. def closeDriver(driver):
  47. # global pid
  48. # os.system("taskkill /pid " + str(pro.pid))
  49. # os.system("taskkill /t /f /im tor.exe")
  50. print('Closing Tor...')
  51. driver.close()
  52. time.sleep(3)
  53. return
  54. # Creates FireFox 'driver' and configure its 'Profile'
  55. # to use Tor proxy and socket
  56. def createFFDriver():
  57. from MarketPlaces.Initialization.markets_mining import config
  58. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  59. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  60. ff_prof.set_preference("places.history.enabled", False)
  61. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  62. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  63. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  64. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  65. ff_prof.set_preference("signon.rememberSignons", False)
  66. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  67. # ff_prof.set_preference("network.dns.disablePrefetch", True)
  68. # ff_prof.set_preference("network.http.sendRefererHeader", 0)
  69. ff_prof.set_preference("permissions.default.image", 3)
  70. ff_prof.set_preference("browser.download.folderList", 2)
  71. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  72. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  73. ff_prof.set_preference('network.proxy.type', 1)
  74. ff_prof.set_preference("network.proxy.socks_version", 5)
  75. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  76. ff_prof.set_preference('network.proxy.socks_port', 9150)
  77. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  78. ff_prof.set_preference("javascript.enabled", False)
  79. ff_prof.update_preferences()
  80. service = Service(config.get('TOR', 'geckodriver_path'))
  81. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  82. driver.maximize_window()
  83. return driver
  84. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  85. def getAccess():
  86. url = getFixedURL()
  87. driver = createFFDriver()
  88. try:
  89. driver.get(url)
  90. return driver
  91. except:
  92. driver.close()
  93. return 'down'
  94. def login(driver):
  95. input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
  96. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  97. (By.XPATH, '/html/body/div/div/div[1]/div/form/div[3]/input[1]')))
  98. # entering username and password into input boxes
  99. try:
  100. usernameBox = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[1]/div/form/div[3]/input[1]')
  101. # Username here
  102. usernameBox.send_keys('ct1234')
  103. passwordBox = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[1]/div/form/div[3]/input[2]')
  104. # Password here
  105. passwordBox.send_keys('DementedBed123-')
  106. except:
  107. usernameBox = driver.find_element(by=By.CSS_SELECTOR, value='input.border-solid:nth-child(2)')
  108. # Username here
  109. usernameBox.send_keys('ct1234')
  110. passwordBox = driver.find_element(by=By.CSS_SELECTOR, value='input.border-solid:nth-child(4)')
  111. # Password here
  112. passwordBox.send_keys('DementedBed123-')
  113. input("Press ENTER AFTER phishing is completed (there is a captcha first and then an antiphishing check)\n")
  114. # wait for listing page show up (This Xpath may need to change based on different seed url)
  115. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  116. (By.XPATH, '/html/body/div/div/div[2]/div/div[2]')))
  117. def savePage(driver, page, url):
  118. cleanPage = cleanHTML(driver, page)
  119. filePath = getFullPathName(url)
  120. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  121. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  122. return
  123. def getFullPathName(url):
  124. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  125. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  126. fileName = getNameFromURL(url)
  127. if isDescriptionLink(url):
  128. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  129. else:
  130. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  131. return fullPath
  132. def getMKTName() -> str:
  133. name = 'Abacus'
  134. return name
  135. def getNameFromURL(url):
  136. global counter
  137. name = ''.join(e for e in url if e.isalnum())
  138. if name == '':
  139. name = str(counter)
  140. counter = counter + 1
  141. return name
  142. def getInterestedLinks():
  143. links = []
  144. # botnets and malware
  145. links.append('http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=475756f633d0cc71f0c868bd&cats=2&s_quick=1')
  146. # # social engineering
  147. # links.append(
  148. # 'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=1c29a89f7a4022133cab877d&cats=2&s_quick=1')
  149. # digital
  150. links.append(
  151. 'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=475756f633d0cc71f0c868bd&cats=2&s_quick=1')
  152. # # hacking
  153. # links.append(
  154. # 'http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=a0773b3de70bdaca38acda2f&cats=2&s_quick=1')
  155. # # carding
  156. # links.append('http://abacuseeettcn3n2zxo7tqy5vsxhqpha2jtjqs7cgdjzl2jascr4liad.onion/search?fcats[]=1b17857dc74c11953df85c55&cats=2&s_quick=1
  157. return links
  158. def crawlForum(driver):
  159. print("Crawling the Abacus market")
  160. linksToCrawl = getInterestedLinks()
  161. i = 0
  162. while i < len(linksToCrawl):
  163. link = linksToCrawl[i]
  164. print('Crawling :', link)
  165. try:
  166. has_next_page = True
  167. count = 0
  168. while has_next_page:
  169. try:
  170. print('waiting ten seconds to avoid ddos check')
  171. time.sleep(10)
  172. driver.get(link)
  173. except:
  174. driver.refresh()
  175. html = driver.page_source
  176. savePage(driver, html, link)
  177. list = productPages(html)
  178. for item in list:
  179. itemURL = urlparse.urljoin(baseURL, str(item))
  180. try:
  181. print(itemURL)
  182. print('waiting 5 sec to avoid ddos check')
  183. time.sleep(5)
  184. driver.get(itemURL)
  185. except:
  186. driver.refresh()
  187. savePage(driver, driver.page_source, item)
  188. print('waiting 20 seconds to avoid ddos check')
  189. time.sleep(20)
  190. driver.back()
  191. # comment out
  192. break
  193. # # comment out
  194. # if count == 3:
  195. # break
  196. try:
  197. chev = driver.find_element(by=By.XPATH, value='/html/body/div/div/div[2]/div/div[3]/div[4]')
  198. a_tags = chev.find_elements(by=By.TAG_NAME, value='a')
  199. try:
  200. for a_tag in a_tags:
  201. try:
  202. temp = a_tag.find_element(by=By.CLASS_NAME, value='gg-chevron-right')
  203. except:
  204. temp = ''
  205. if temp:
  206. link = a_tag.get_attribute('href')
  207. print(link)
  208. if link == '#':
  209. link = ''
  210. break
  211. else:
  212. link = ''
  213. except:
  214. try:
  215. a_tag = a_tags[-2].find_element(by=By.CLASS_NAME, value='gg-chevron-right').get_attribute('href')
  216. if a_tag:
  217. link = a_tag.get_attribute('href')
  218. if link == '#':
  219. link = ''
  220. break
  221. else:
  222. link = ''
  223. except:
  224. link=''
  225. if link == "":
  226. raise NoSuchElementException
  227. count += 1
  228. except NoSuchElementException:
  229. has_next_page = False
  230. except Exception as e:
  231. print(link, e)
  232. i += 1
  233. print("Crawling the Abacus market done.")
  234. # Returns 'True' if the link is Topic link, may need to change for every website
  235. def isDescriptionLink(url):
  236. if 'listing' in url:
  237. return True
  238. return False
  239. # Returns True if the link is a listingPage link, may need to change for every website
  240. def isListingLink(url):
  241. if 'search' in url:
  242. return True
  243. return False
  244. def productPages(html):
  245. soup = BeautifulSoup(html, "html.parser")
  246. return abacus_links_parser(soup)
  247. def crawler():
  248. startCrawling()