this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

302 lines
10 KiB

  1. __author__ = 'Helium'
  2. '''
  3. CityMarket Marketplace Crawler (Selenium)
  4. not complete
  5. need to go through multiple pages...
  6. '''
  7. from selenium import webdriver
  8. from selenium.common.exceptions import NoSuchElementException
  9. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  10. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  11. from selenium.webdriver.firefox.service import Service
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from selenium.webdriver.support import expected_conditions as EC
  14. from selenium.webdriver.common.by import By
  15. from PIL import Image
  16. import urllib.parse as urlparse
  17. import os, re, time
  18. from datetime import date
  19. import subprocess
  20. import configparser
  21. from bs4 import BeautifulSoup
  22. from MarketPlaces.Initialization.prepare_parser import new_parse
  23. from MarketPlaces.NexusMarket.parser import nexus_links_parser
  24. from MarketPlaces.Utilities.utilities import cleanHTML
  25. counter = 1
  26. baseURL = 'http://nexusabcdkq4pdlubs6wk6ad7pobuupzoomoxi6p7l32ci4vjtb2z7yd.onion/'
  27. # Opens Tor Browser, crawls the website, then parses, then closes tor
  28. #acts like the main method for the crawler, another function at the end of this code calls this function later
  29. def startCrawling():
  30. mktName = getMKTName()
  31. driver = getAccess()
  32. if driver != 'down':
  33. try:
  34. login(driver)
  35. crawlForum(driver)
  36. except Exception as e:
  37. print(driver.current_url, e)
  38. closeDriver(driver)
  39. new_parse(mktName, baseURL, True)
  40. # Returns the name of the website
  41. #return: name of site in string type
  42. def getMKTName():
  43. name = 'NexusMarket'
  44. return name
  45. # Return the base link of the website
  46. #return: url of base site in string type
  47. def getFixedURL():
  48. url = 'http://nexusabcdkq4pdlubs6wk6ad7pobuupzoomoxi6p7l32ci4vjtb2z7yd.onion/'
  49. return url
  50. # Closes Tor Browser
  51. #@param: current selenium driver
  52. def closeDriver(driver):
  53. # global pid
  54. # os.system("taskkill /pid " + str(pro.pid))
  55. # os.system("taskkill /t /f /im tor.exe")
  56. print('Closing Tor...')
  57. driver.close()
  58. time.sleep(3)
  59. return
  60. # Creates FireFox 'driver' and configure its 'Profile'
  61. # to use Tor proxy and socket
  62. def createFFDriver():
  63. from MarketPlaces.Initialization.markets_mining import config
  64. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  65. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  66. ff_prof.set_preference("places.history.enabled", False)
  67. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  68. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  69. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  70. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  71. ff_prof.set_preference("signon.rememberSignons", False)
  72. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  73. ff_prof.set_preference("network.dns.disablePrefetch", True)
  74. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  75. ff_prof.set_preference("permissions.default.image", 3)
  76. ff_prof.set_preference("browser.download.folderList", 2)
  77. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  78. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  79. ff_prof.set_preference('network.proxy.type', 1)
  80. ff_prof.set_preference("network.proxy.socks_version", 5)
  81. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  82. ff_prof.set_preference('network.proxy.socks_port', 9150)
  83. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  84. ff_prof.set_preference("javascript.enabled", False)
  85. ff_prof.update_preferences()
  86. service = Service(config.get('TOR', 'geckodriver_path'))
  87. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  88. driver.maximize_window()
  89. return driver
  90. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  91. #return: return the selenium driver or string 'down'
  92. def getAccess():
  93. url = getFixedURL()
  94. driver = createFFDriver()
  95. try:
  96. driver.get(url)
  97. return driver
  98. except:
  99. driver.close()
  100. return 'down'
  101. # Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
  102. # then allows for manual solving of captcha in the terminal
  103. #@param: current selenium web driver
  104. def login(driver):
  105. input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
  106. # entering username and password into input boxes
  107. usernameBox = driver.find_element(by=By.XPATH, value='//input[@id="username"]')
  108. # Username here
  109. usernameBox.send_keys('findingmykeys')
  110. passwordBox = driver.find_element(by=By.XPATH, value='//input[@id="inputPassword3"]')
  111. # Password here
  112. passwordBox.send_keys('ican’tFindMycarKey$')
  113. input("Press ENTER when CAPTCHA is completed and you exit the newsletter\n")
  114. # wait for listing page show up (This Xpath may need to change based on different seed url)
  115. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  116. (By.XPATH, '//*[@id="collapse3"]')))
  117. # Saves the crawled html page, makes the directory path for html pages if not made
  118. def savePage(driver, page, url):
  119. cleanPage = cleanHTML(driver, page)
  120. filePath = getFullPathName(url)
  121. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  122. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  123. return
  124. # Gets the full path of the page to be saved along with its appropriate file name
  125. #@param: raw url as crawler crawls through every site
  126. def getFullPathName(url):
  127. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  128. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  129. fileName = getNameFromURL(url)
  130. if isDescriptionLink(url):
  131. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  132. else:
  133. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  134. return fullPath
  135. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  136. #@param: raw url as crawler crawls through every site
  137. def getNameFromURL(url):
  138. global counter
  139. name = ''.join(e for e in url if e.isalnum())
  140. if (name == ''):
  141. name = str(counter)
  142. counter = counter + 1
  143. return name
  144. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  145. #in this example, there are a couple of categories some threads fall under such as
  146. # Guides and Tutorials, Digital Products, and Software and Malware
  147. #as you can see they are categories of products
  148. def getInterestedLinks():
  149. links = []
  150. # # Hire hacker
  151. # links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=3')
  152. # # other
  153. # links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=14')
  154. # malware
  155. links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=15')
  156. # ddos
  157. links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=16')
  158. # software
  159. links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=17')
  160. # botnet
  161. links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=18')
  162. # hacking service
  163. links.append('http://wsptlnuoo3johqzcdlwuj5zcwfh2dwmswz6hahqctuxttvxpanypmwad.onion/?sub_id=31')
  164. return links
  165. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  166. #topic and description pages are crawled through here, where both types of pages are saved
  167. #@param: selenium driver
  168. def crawlForum(driver):
  169. print("Crawling the CityMarket market")
  170. linksToCrawl = getInterestedLinks()
  171. i = 0
  172. while i < len(linksToCrawl):
  173. link = linksToCrawl[i]
  174. print('Crawling :', link)
  175. try:
  176. has_next_page = True
  177. count = 0
  178. while has_next_page:
  179. try:
  180. driver.get(link)
  181. except:
  182. driver.refresh()
  183. html = driver.page_source
  184. savePage(driver, html, linksToCrawl[i] + f"page{count+1}")
  185. list = productPages(html)
  186. for item in list:
  187. # what is this line doing?
  188. itemURL = urlparse.urljoin(baseURL, str(item))
  189. try:
  190. driver.get(itemURL)
  191. except:
  192. driver.refresh()
  193. savePage(driver, driver.page_source, item)
  194. driver.back()
  195. # # comment out
  196. # break
  197. #
  198. # # comment out
  199. # if count == 1:
  200. # break
  201. try:
  202. link = driver.find_element(by=By.XPATH, value='//a[@rel="next"]').get_attribute('href')
  203. if link == "":
  204. raise NoSuchElementException
  205. count += 1
  206. except NoSuchElementException:
  207. has_next_page = False
  208. except Exception as e:
  209. print(link, e)
  210. i += 1
  211. print("Crawling the CityMarket market done.")
  212. # Returns 'True' if the link is a description link
  213. #@param: url of any url crawled
  214. #return: true if is a description page, false if not
  215. def isDescriptionLink(url):
  216. if 'single_product' in url:
  217. return True
  218. return False
  219. # Returns True if the link is a listingPage link
  220. #@param: url of any url crawled
  221. #return: true if is a Listing page, false if not
  222. def isListingLink(url):
  223. if 'sub_id' in url:
  224. return True
  225. return False
  226. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  227. #@param: link from interested link list ie. getInterestingLinks()
  228. #return: list of description links that should be crawled through
  229. def productPages(html):
  230. soup = BeautifulSoup(html, "html.parser")
  231. return nexus_links_parser(soup)
  232. # Drop links that "signout"
  233. # def isSignOut(url):
  234. # #absURL = urlparse.urljoin(url.base_url, url.url)
  235. # if 'signout' in url.lower() or 'logout' in url.lower():
  236. # return True
  237. #
  238. # return False
  239. def crawler():
  240. startCrawling()
  241. # print("Crawling and Parsing NexusMarket .... DONE!")