this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

333 lines
12 KiB

  1. __author__ = 'DarkWeb'
  2. '''
  3. ViceCity Market Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.support.ui import WebDriverWait
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.common.by import By
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, time
  16. from datetime import date
  17. import subprocess
  18. import configparser
  19. import subprocess
  20. from bs4 import BeautifulSoup
  21. from MarketPlaces.Initialization.prepare_parser import new_parse
  22. from MarketPlaces.ViceCity.parser import vicecity_links_parser
  23. from MarketPlaces.Utilities.utilities import cleanHTML
  24. config = configparser.ConfigParser()
  25. config.read('../../setup.ini')
  26. counter = 1
  27. baseURL = 'http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/'
  28. # Opens Tor Browser, crawls the website, then parses, then closes tor
  29. #acts like the main method for the crawler, another function at the end of this code calls this function later
  30. def startCrawling():
  31. opentor()
  32. mktName = getMKTName()
  33. driver = getAccess()
  34. if driver != 'down':
  35. try:
  36. login(driver)
  37. crawlForum(driver)
  38. except Exception as e:
  39. print(driver.current_url, e)
  40. closetor(driver)
  41. new_parse(mktName, baseURL, False)
  42. # Opens Tor Browser
  43. #prompts for ENTER input to continue
  44. def opentor():
  45. global pid
  46. print("Connecting Tor...")
  47. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  48. pid = pro.pid
  49. time.sleep(7.5)
  50. input('Tor Connected. Press ENTER to continue\n')
  51. return
  52. # Returns the name of the website
  53. #return: name of site in string type
  54. def getMKTName():
  55. name = 'ViceCity'
  56. return name
  57. # Return the base link of the website
  58. #return: url of base site in string type
  59. def getFixedURL():
  60. url = 'http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/'
  61. return url
  62. # Closes Tor Browser
  63. #@param: current selenium driver
  64. def closetor(driver):
  65. # global pid
  66. # os.system("taskkill /pid " + str(pro.pid))
  67. # os.system("taskkill /t /f /im tor.exe")
  68. print('Closing Tor...')
  69. driver.close()
  70. time.sleep(3)
  71. return
  72. # Creates FireFox 'driver' and configure its 'Profile'
  73. # to use Tor proxy and socket
  74. def createFFDriver():
  75. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  76. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  77. # ff_prof.set_preference("places.history.enabled", False)
  78. # ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  79. # ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  80. # ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  81. # ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  82. # ff_prof.set_preference("signon.rememberSignons", False)
  83. # ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  84. # ff_prof.set_preference("network.dns.disablePrefetch", True)
  85. # ff_prof.set_preference("network.http.sendRefererHeader", 0)
  86. # ff_prof.set_preference("permissions.default.image", 3)
  87. # ff_prof.set_preference("browser.download.folderList", 2)
  88. # ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  89. # ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  90. ff_prof.set_preference('network.proxy.type', 1)
  91. ff_prof.set_preference("network.proxy.socks_version", 5)
  92. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  93. ff_prof.set_preference('network.proxy.socks_port', 9150)
  94. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  95. ff_prof.set_preference("javascript.enabled", False)
  96. ff_prof.update_preferences()
  97. service = Service(config.get('TOR', 'geckodriver_path'))
  98. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  99. return driver
  100. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  101. #return: return the selenium driver or string 'down'
  102. def getAccess():
  103. url = getFixedURL()
  104. driver = createFFDriver()
  105. try:
  106. driver.get(url)
  107. return driver
  108. except:
  109. driver.close()
  110. return 'down'
  111. # Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
  112. # then allows for manual solving of captcha in the terminal
  113. #@param: current selenium web driver
  114. def login(driver):
  115. # wait for first captcha page to show up (This Xpath may need to change based on different seed url)
  116. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  117. (By.XPATH, "/html/body/div/div/form/div/div[1]")))
  118. input("Press Enter once captcha done (dont press done)")
  119. #clicks button after captcha is inputted
  120. driver.find_element(by=By.XPATH, value='/html/body/div/div/form/button').click()
  121. #wait for login page to show up
  122. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  123. (By.XPATH, '/html/body/div/div/div/form')))
  124. #puts username into box
  125. userBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
  126. userBox.send_keys('ct1234')
  127. #waits for second catpcha to be inputted by user
  128. input("Press Enter once captcha done (dont press continue)")
  129. #clicks on continue
  130. driver.find_element(by=By.XPATH, value='/html/body/div/div/div/form/input[2]').click()
  131. #waits for password to show
  132. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  133. (By.XPATH, '/html/body/div/div/div/form/div[3]/input')))
  134. time.sleep(10) # give time for site to catch up
  135. # puts password into box
  136. passBox = driver.find_element(by=By.XPATH, value='/html/body/div/div/div/form/div[2]/input')
  137. passBox.send_keys('DementedBed123-')
  138. driver.find_element(by=By.XPATH, value='/html/body/div/div/div/form/div[3]/input').click()
  139. # wait for pin input to show
  140. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  141. (By.XPATH, '/html/body/div[1]/div/form/span')))
  142. pinBox = driver.find_element(by=By.XPATH, value='/html/body/div[1]/div/form/input[1]')
  143. pinBox.send_keys('12345')
  144. driver.find_element(by=By.XPATH, value='/html/body/div[1]/div/form/input[2]').click()
  145. # waits for main listing page before crawling to ensure everything goes well
  146. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  147. (By.XPATH, '/html/body/div[1]/div/div[2]')))
  148. # Saves the crawled html page, makes the directory path for html pages if not made
  149. def savePage(driver, page, url):
  150. cleanPage = cleanHTML(driver, page)
  151. filePath = getFullPathName(url)
  152. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  153. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  154. return
  155. # Gets the full path of the page to be saved along with its appropriate file name
  156. #@param: raw url as crawler crawls through every site
  157. def getFullPathName(url):
  158. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  159. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  160. fileName = getNameFromURL(url)
  161. if isDescriptionLink(url):
  162. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  163. else:
  164. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  165. return fullPath
  166. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  167. #@param: raw url as crawler crawls through every site
  168. def getNameFromURL(url):
  169. global counter
  170. name = ''.join(e for e in url if e.isalnum())
  171. if (name == ''):
  172. name = str(counter)
  173. counter = counter + 1
  174. return name
  175. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  176. #in this example, there are a couple of categories some threads fall under such as
  177. # Guides and Tutorials, Digital Products, and Software and Malware
  178. #as you can see they are categories of products
  179. def getInterestedLinks():
  180. links = []
  181. # Digital - Fraud Software, Has Hacking and Guides
  182. links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=150')
  183. # Digital - Guides and Tutorials
  184. links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=94')
  185. # Carding Services
  186. links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=155')
  187. # Digital - Other (half junk half random stuff like: bots, rats, viruses, and guides)
  188. links.append('http://52qlucglu6fuaqist2herssakipapig2higaaayu7446n55xw4ylxqid.onion/?category=153')
  189. return links
  190. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  191. #topic and description pages are crawled through here, where both types of pages are saved
  192. #@param: selenium driver
  193. def crawlForum(driver):
  194. print("Crawling the ViceCity Market")
  195. linksToCrawl = getInterestedLinks()
  196. visited = set(linksToCrawl)
  197. initialTime = time.time()
  198. count = 0
  199. i = 0
  200. while i < len(linksToCrawl):
  201. link = linksToCrawl[i]
  202. print('Crawling :', link)
  203. try:
  204. try:
  205. driver.get(link)
  206. except:
  207. driver.refresh()
  208. html = driver.page_source
  209. savePage(driver, html, link)
  210. has_next_page = True
  211. while has_next_page:
  212. list = productPages(html)
  213. j = 0
  214. for item in list:
  215. itemURL = urlparse.urljoin(baseURL, str(item))
  216. try:
  217. driver.get(itemURL)
  218. except:
  219. driver.refresh()
  220. time.sleep(2.5) # to let page catchup
  221. savePage(driver, driver.page_source, item)
  222. time.sleep(2.5) # so site doesnt crash
  223. driver.back()
  224. #comment out
  225. # break
  226. # # comment out
  227. # if count == 1:
  228. # count = 0
  229. # break
  230. try:
  231. temp = driver.find_element(by=By.CLASS_NAME, value='pagination')
  232. link = temp.find_element(by=By.LINK_TEXT, value='Next').get_attribute('href')
  233. if link == "":
  234. raise NoSuchElementException
  235. try:
  236. driver.get(link)
  237. except:
  238. driver.refresh()
  239. html = driver.page_source
  240. savePage(driver, html, link)
  241. count += 1
  242. except NoSuchElementException:
  243. has_next_page = False
  244. except Exception as e:
  245. print(link, e)
  246. i += 1
  247. # finalTime = time.time()
  248. # print finalTime - initialTime
  249. input("Crawling ViceCity done sucessfully. Press ENTER to continue\n")
  250. # Returns 'True' if the link is a description link
  251. #@param: url of any url crawled
  252. #return: true if is a description page, false if not
  253. def isDescriptionLink(url):
  254. if 'listing' in url:
  255. return True
  256. return False
  257. # Returns True if the link is a listingPage link
  258. #@param: url of any url crawled
  259. #return: true if is a Listing page, false if not
  260. def isListingLink(url):
  261. if 'category' in url:
  262. return True
  263. return False
  264. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  265. #@param: link from interested link list ie. getInterestingLinks()
  266. #return: list of description links that should be crawled through
  267. def productPages(html):
  268. soup = BeautifulSoup(html, "html.parser")
  269. return vicecity_links_parser(soup)
  270. def crawler():
  271. startCrawling()
  272. # print("Crawling and Parsing BestCardingWorld .... DONE!")