this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

297 lines
10 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'Helium'
  2. '''
  3. LionMarketplace Marketplace Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.support.ui import WebDriverWait
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.common.by import By
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. from datetime import date
  17. import subprocess
  18. import configparser
  19. from bs4 import BeautifulSoup
  20. from MarketPlaces.Initialization.prepare_parser import new_parse
  21. from MarketPlaces.LionMarketplace.parser import lionmarketplace_links_parser
  22. from MarketPlaces.Utilities.utilities import cleanHTML
  23. counter = 1
  24. baseURL = 'http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/'
  25. # Opens Tor Browser, crawls the website, then parses, then closes tor
  26. #acts like the main method for the crawler, another function at the end of this code calls this function later
  27. def startCrawling():
  28. # opentor()
  29. mktName = getMKTName()
  30. driver = getAccess()
  31. if driver != 'down':
  32. try:
  33. # login(driver)
  34. crawlForum(driver)
  35. except Exception as e:
  36. print(driver.current_url, e)
  37. closetor(driver)
  38. new_parse(mktName, baseURL, True)
  39. # Opens Tor Browser
  40. #prompts for ENTER input to continue
  41. def opentor():
  42. from MarketPlaces.Initialization.markets_mining import config
  43. global pid
  44. print("Connecting Tor...")
  45. pro = subprocess.Popen(config.get('TOR', 'firefox_binary_path'))
  46. pid = pro.pid
  47. time.sleep(7.5)
  48. input('Tor Connected. Press ENTER to continue\n')
  49. return
  50. # Returns the name of the website
  51. #return: name of site in string type
  52. def getMKTName():
  53. name = 'LionMarketplace'
  54. return name
  55. # Return the base link of the website
  56. #return: url of base site in string type
  57. def getFixedURL():
  58. url = 'http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/'
  59. return url
  60. # Closes Tor Browser
  61. #@param: current selenium driver
  62. def closetor(driver):
  63. # global pid
  64. # os.system("taskkill /pid " + str(pro.pid))
  65. # os.system("taskkill /t /f /im tor.exe")
  66. print('Closing Tor...')
  67. driver.close()
  68. time.sleep(3)
  69. return
  70. # Creates FireFox 'driver' and configure its 'Profile'
  71. # to use Tor proxy and socket
  72. def createFFDriver():
  73. from MarketPlaces.Initialization.markets_mining import config
  74. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  75. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  76. ff_prof.set_preference("places.history.enabled", False)
  77. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  78. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  79. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  80. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  81. ff_prof.set_preference("signon.rememberSignons", False)
  82. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  83. ff_prof.set_preference("network.dns.disablePrefetch", True)
  84. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  85. ff_prof.set_preference("permissions.default.image", 1)
  86. ff_prof.set_preference("browser.download.folderList", 2)
  87. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  88. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  89. ff_prof.set_preference('network.proxy.type', 1)
  90. ff_prof.set_preference("network.proxy.socks_version", 5)
  91. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  92. ff_prof.set_preference('network.proxy.socks_port', 9150)
  93. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  94. ff_prof.set_preference("javascript.enabled", False)
  95. ff_prof.update_preferences()
  96. service = Service(config.get('TOR', 'geckodriver_path'))
  97. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  98. driver.maximize_window()
  99. return driver
  100. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  101. #return: return the selenium driver or string 'down'
  102. def getAccess():
  103. url = getFixedURL()
  104. driver = createFFDriver()
  105. try:
  106. driver.get(url)
  107. return driver
  108. except:
  109. driver.close()
  110. return 'down'
  111. # Manual captcha solver, waits fora specific element so that the whole page loads, finds the input box, gets screenshot of captcha
  112. # then allows for manual solving of captcha in the terminal
  113. #@param: current selenium web driver
  114. def login(driver):
  115. # wait for page to show up (This Xpath may need to change based on different seed url)
  116. WebDriverWait(driver, 100).until(EC.visibility_of_element_located(
  117. (By.XPATH, "/html/body/div[2]/div[2]/div[2]/div[1]/div/div[2]/div")))
  118. # Saves the crawled html page, makes the directory path for html pages if not made
  119. def savePage(driver, page, url):
  120. cleanPage = cleanHTML(driver, page)
  121. filePath = getFullPathName(url)
  122. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  123. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  124. return
  125. # Gets the full path of the page to be saved along with its appropriate file name
  126. #@param: raw url as crawler crawls through every site
  127. def getFullPathName(url):
  128. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  129. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  130. fileName = getNameFromURL(url)
  131. if isDescriptionLink(url):
  132. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  133. else:
  134. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  135. return fullPath
  136. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  137. #@param: raw url as crawler crawls through every site
  138. def getNameFromURL(url):
  139. global counter
  140. name = ''.join(e for e in url if e.isalnum())
  141. if (name == ''):
  142. name = str(counter)
  143. counter = counter + 1
  144. return name
  145. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  146. #in this example, there are a couple of categories some threads fall under such as
  147. # Guides and Tutorials, Digital Products, and Software and Malware
  148. #as you can see they are categories of products
  149. def getInterestedLinks():
  150. links = []
  151. # Software/Malware
  152. links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/16')
  153. # # Carding
  154. # links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/20')
  155. # # Hacking
  156. # links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/ba142ac0-c7e7-11ec-9bd1-fdd89c3d3f91')
  157. # # tutorial
  158. # links.append('http://lionznqc2hg2wsp5vgruqait4cpknihwlje6hkjyi52lcl5ivyf7bcad.onion/category/19')
  159. return links
  160. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  161. #topic and description pages are crawled through here, where both types of pages are saved
  162. #@param: selenium driver
  163. def crawlForum(driver):
  164. print("Crawling the LionMarketplace market")
  165. linksToCrawl = getInterestedLinks()
  166. i = 0
  167. while i < len(linksToCrawl):
  168. link = linksToCrawl[i]
  169. print('Crawling :', link)
  170. try:
  171. has_next_page = True
  172. count = 0
  173. while has_next_page:
  174. try:
  175. driver.get(link)
  176. except:
  177. driver.refresh()
  178. html = driver.page_source
  179. savePage(driver, html, link)
  180. list = productPages(html)
  181. for item in list:
  182. itemURL = urlparse.urljoin(baseURL, str(item))
  183. try:
  184. driver.get(itemURL)
  185. except:
  186. driver.refresh()
  187. savePage(driver, driver.page_source, item)
  188. driver.back()
  189. # comment out
  190. # break
  191. # comment out
  192. if count == 1:
  193. break
  194. try:
  195. link = driver.find_element(by=By.XPATH, value=
  196. '/html/body/div[2]/div[2]/div/div[2]/nav/ul/li[5]/a').get_attribute('href')
  197. if link == "":
  198. raise NoSuchElementException
  199. count += 1
  200. except NoSuchElementException:
  201. has_next_page = False
  202. except Exception as e:
  203. print(link, e)
  204. i += 1
  205. print("Crawling the LionMarketplace market done.")
  206. # Returns 'True' if the link is a description link
  207. #@param: url of any url crawled
  208. #return: true if is a description page, false if not
  209. def isDescriptionLink(url):
  210. if 'product' in url:
  211. return True
  212. return False
  213. # Returns True if the link is a listingPage link
  214. #@param: url of any url crawled
  215. #return: true if is a Listing page, false if not
  216. def isListingLink(url):
  217. if 'category' in url:
  218. return True
  219. return False
  220. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  221. #@param: link from interested link list ie. getInterestingLinks()
  222. #return: list of description links that should be crawled through
  223. def productPages(html):
  224. soup = BeautifulSoup(html, "html.parser")
  225. return lionmarketplace_links_parser(soup)
  226. # Drop links that "signout"
  227. # def isSignOut(url):
  228. # #absURL = urlparse.urljoin(url.base_url, url.url)
  229. # if 'signout' in url.lower() or 'logout' in url.lower():
  230. # return True
  231. #
  232. # return False
  233. def crawler():
  234. startCrawling()
  235. # print("Crawling and Parsing BestCardingWorld .... DONE!")