this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

284 lines
9.9 KiB

1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'Helium'
  2. '''
  3. DarkMatter Marketplace Crawler (Selenium)
  4. Crawler works, but it slow since there is a speed check for clicking
  5. '''
  6. from selenium import webdriver
  7. from selenium.common.exceptions import NoSuchElementException
  8. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  9. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  10. from selenium.webdriver.firefox.service import Service
  11. from selenium.webdriver.support.ui import WebDriverWait
  12. from selenium.webdriver.support import expected_conditions as EC
  13. from selenium.webdriver.common.by import By
  14. from PIL import Image
  15. import urllib.parse as urlparse
  16. import os, re, time
  17. from datetime import date
  18. import subprocess
  19. import configparser
  20. from bs4 import BeautifulSoup
  21. from MarketPlaces.Initialization.prepare_parser import new_parse
  22. from MarketPlaces.DarkMatter.parser import darkmatter_links_parser
  23. from MarketPlaces.Utilities.utilities import cleanHTML
  24. counter = 1
  25. baseURL = 'http://darkmat3kdxestusl437urshpsravq7oqb7t3m36u2l62vnmmldzdmid.onion/'
  26. # Opens Tor Browser, crawls the website, then parses, then closes tor
  27. #acts like the main method for the crawler, another function at the end of this code calls this function later
  28. def startCrawling():
  29. mktName = getMKTName()
  30. driver = getAccess()
  31. if driver != 'down':
  32. try:
  33. login(driver)
  34. crawlForum(driver)
  35. except Exception as e:
  36. print(driver.current_url, e)
  37. closeDriver(driver)
  38. new_parse(mktName, baseURL, True)
  39. # Returns the name of the website
  40. #return: name of site in string type
  41. def getMKTName():
  42. name = 'DarkMatter'
  43. return name
  44. # Return the base link of the website
  45. #return: url of base site in string type
  46. def getFixedURL():
  47. url = 'http://darkmat3kdxestusl437urshpsravq7oqb7t3m36u2l62vnmmldzdmid.onion/'
  48. return url
  49. # Closes Tor Browser
  50. #@param: current selenium driver
  51. def closeDriver(driver):
  52. # global pid
  53. # os.system("taskkill /pid " + str(pro.pid))
  54. # os.system("taskkill /t /f /im tor.exe")
  55. print('Closing Tor...')
  56. driver.close()
  57. time.sleep(3)
  58. return
  59. # Creates FireFox 'driver' and configure its 'Profile'
  60. # to use Tor proxy and socket
  61. def createFFDriver():
  62. from MarketPlaces.Initialization.markets_mining import config
  63. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  64. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  65. ff_prof.set_preference("places.history.enabled", False)
  66. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  67. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  68. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  69. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  70. ff_prof.set_preference("signon.rememberSignons", False)
  71. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  72. #ff_prof.set_preference("network.dns.disablePrefetch", True)#connection issue
  73. #ff_prof.set_preference("network.http.sendRefererHeader", 0)#connection issue
  74. ff_prof.set_preference("permissions.default.image", 3)
  75. ff_prof.set_preference("browser.download.folderList", 2)
  76. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  77. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  78. ff_prof.set_preference('network.proxy.type', 1)
  79. ff_prof.set_preference("network.proxy.socks_version", 5)
  80. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  81. ff_prof.set_preference('network.proxy.socks_port', 9150)
  82. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  83. ff_prof.set_preference("javascript.enabled", False)
  84. ff_prof.update_preferences()
  85. service = Service(config.get('TOR', 'geckodriver_path'))
  86. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  87. driver.maximize_window()
  88. return driver
  89. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  90. #return: return the selenium driver or string 'down'
  91. def getAccess():
  92. url = getFixedURL()
  93. driver = createFFDriver()
  94. try:
  95. driver.get(url)
  96. return driver
  97. except:
  98. driver.close()
  99. return 'down'
  100. # Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
  101. # then allows for manual solving of captcha in the terminal
  102. #@param: current selenium web driver
  103. def login(driver):
  104. input("Press ENTER when CAPTCHA is completed and page is loaded\n")
  105. # wait for page to show up (This Xpath may need to change based on different seed url)
  106. # Saves the crawled html page, makes the directory path for html pages if not made
  107. def savePage(driver, page, url):
  108. cleanPage = cleanHTML(driver, page)
  109. filePath = getFullPathName(url)
  110. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  111. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  112. return
  113. # Gets the full path of the page to be saved along with its appropriate file name
  114. #@param: raw url as crawler crawls through every site
  115. def getFullPathName(url):
  116. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  117. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  118. fileName = getNameFromURL(url)
  119. if isDescriptionLink(url):
  120. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  121. else:
  122. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  123. return fullPath
  124. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  125. #@param: raw url as crawler crawls through every site
  126. def getNameFromURL(url):
  127. global counter
  128. name = ''.join(e for e in url if e.isalnum())
  129. if (name == ''):
  130. name = str(counter)
  131. counter = counter + 1
  132. return name
  133. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  134. #in this example, there are a couple of categories some threads fall under such as
  135. # Guides and Tutorials, Digital Products, and Software and Malware
  136. #as you can see they are categories of products
  137. def getInterestedLinks():
  138. links = []
  139. # # digital fraud software
  140. # links.append('http://darkmat3kdxestusl437urshpsravq7oqb7t3m36u2l62vnmmldzdmid.onion/market/products/?category=76')
  141. # # legit
  142. # links.append('http://darkmat3kdxestusl437urshpsravq7oqb7t3m36u2l62vnmmldzdmid.onion/market/products/?category=78')
  143. # # hack guides
  144. # links.append('http://darkmat3kdxestusl437urshpsravq7oqb7t3m36u2l62vnmmldzdmid.onion/market/products/?category=94')
  145. # # services
  146. # links.append('http://darkmat3kdxestusl437urshpsravq7oqb7t3m36u2l62vnmmldzdmid.onion/market/products/?category=117')
  147. # software/malware
  148. links.append('http://darkmat3kdxestusl437urshpsravq7oqb7t3m36u2l62vnmmldzdmid.onion/market/products/?category=121')
  149. return links
  150. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  151. #topic and description pages are crawled through here, where both types of pages are saved
  152. #@param: selenium driver
  153. def crawlForum(driver):
  154. print("Crawling the DarkMatter market")
  155. linksToCrawl = getInterestedLinks()
  156. i = 0
  157. while i < len(linksToCrawl):
  158. link = linksToCrawl[i]
  159. print('Crawling :', link)
  160. try:
  161. has_next_page = True
  162. count = 0
  163. while has_next_page:
  164. try:
  165. driver.get(link)
  166. except:
  167. driver.refresh()
  168. html = driver.page_source
  169. savePage(driver, html, link)
  170. list = productPages(html)
  171. for item in list:
  172. itemURL = urlparse.urljoin(baseURL, str(item))
  173. try:
  174. time.sleep(3) # to keep from detecting click speed
  175. driver.get(itemURL)
  176. except:
  177. driver.refresh()
  178. savePage(driver, driver.page_source, item)
  179. time.sleep(3) # to keep from detecting click speed
  180. driver.back()
  181. # comment out
  182. # break
  183. # comment out
  184. if count == 1:
  185. break
  186. try:
  187. link = driver.find_element(by=By.LINK_TEXT, value=">").get_attribute('href')
  188. if link == "":
  189. raise NoSuchElementException
  190. count += 1
  191. except NoSuchElementException:
  192. has_next_page = False
  193. except Exception as e:
  194. print(link, e)
  195. i += 1
  196. print("Crawling the DarkMatter market done.")
  197. # Returns 'True' if the link is a description link
  198. #@param: url of any url crawled
  199. #return: true if is a description page, false if not
  200. def isDescriptionLink(url):
  201. if 'products/' in url and '/products/?category' not in url:
  202. return True
  203. return False
  204. # Returns True if the link is a listingPage link
  205. #@param: url of any url crawled
  206. #return: true if is a Listing page, false if not
  207. def isListingLink(url):
  208. if '?category' in url:
  209. return True
  210. return False
  211. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  212. #@param: link from interested link list ie. getInterestingLinks()
  213. #return: list of description links that should be crawled through
  214. def productPages(html):
  215. soup = BeautifulSoup(html, "html.parser")
  216. return darkmatter_links_parser(soup)
  217. # Drop links that "signout"
  218. # def isSignOut(url):
  219. # #absURL = urlparse.urljoin(url.base_url, url.url)
  220. # if 'signout' in url.lower() or 'logout' in url.lower():
  221. # return True
  222. #
  223. # return False
  224. def crawler():
  225. startCrawling()
  226. # print("Crawling and Parsing BestCardingWorld .... DONE!")