this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

256 lines
8.0 KiB

  1. __author__ = 'DarkWeb'
  2. '''
  3. PabloEscobarMarket Marketplace Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.ui import WebDriverWait
  13. from PIL import Image
  14. import urllib.parse as urlparse
  15. import os, re, time
  16. import subprocess
  17. from bs4 import BeautifulSoup
  18. from MarketPlaces.Initialization.prepare_parser import new_parse
  19. from MarketPlaces.PabloEscobarMarket.parser import pabloescobarmarket_links_parser
  20. from MarketPlaces.Utilities.utilities import cleanHTML
  21. counter = 1
  22. baseURL = 'http://niejmptjzwhlfywruoab4pbuxg7kp2mtcr4c6mgpeykju5matewg36yd.onion/'
  23. # Opens Tor Browser, crawls the website
  24. def startCrawling():
  25. mktName = getMKTName()
  26. driver = getAccess()
  27. if driver != 'down':
  28. try:
  29. login(driver)
  30. crawlForum(driver)
  31. except Exception as e:
  32. print(driver.current_url, e)
  33. closeDriver(driver)
  34. new_parse(mktName, baseURL, True)
  35. # Login using premade account credentials and do login captcha manually
  36. def login(driver):
  37. input("Press ENTER when CAPTCHA is complete and login page has loaded\n")
  38. # entering username and password into input boxes
  39. usernameBox = driver.find_element(by=By.XPATH, value='//*[@id="username"]')
  40. # Username here
  41. usernameBox.send_keys('snorlaxrights')
  42. passwordBox = driver.find_element(by=By.XPATH, value='//*[@id="inputPassword3"]')
  43. # Password here
  44. passwordBox.send_keys('$noringAllday')
  45. input("Press ENTER when CAPTCHA is completed\n")
  46. # wait for listing page show up (This Xpath may need to change based on different seed url)
  47. # wait for 50 sec until id = tab_content is found, then cont
  48. WebDriverWait(driver, 50).until(EC.visibility_of_element_located(
  49. (By.XPATH, '//*[@id="collapse3"]')))
  50. # Returns the name of the website
  51. def getMKTName() -> str:
  52. name = 'PabloEscobarMarket'
  53. return name
  54. # Return the link of the website
  55. def getFixedURL():
  56. url = 'http://niejmptjzwhlfywruoab4pbuxg7kp2mtcr4c6mgpeykju5matewg36yd.onion/'
  57. return url
  58. # Closes Tor Browser
  59. def closeDriver(driver):
  60. # global pid
  61. # os.system("taskkill /pid " + str(pro.pid))
  62. # os.system("taskkill /t /f /im tor.exe")
  63. print('Closing Tor...')
  64. driver.close() #close tab
  65. time.sleep(3)
  66. return
  67. # Creates FireFox 'driver' and configure its 'Profile'
  68. # to use Tor proxy and socket
  69. def createFFDriver():
  70. from MarketPlaces.Initialization.markets_mining import config
  71. ff_binary = FirefoxBinary(config.get('TOR', 'firefox_binary_path'))
  72. ff_prof = FirefoxProfile(config.get('TOR', 'firefox_profile_path'))
  73. ff_prof.set_preference("places.history.enabled", False)
  74. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  75. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  76. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  77. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  78. ff_prof.set_preference("signon.rememberSignons", False)
  79. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  80. ff_prof.set_preference("network.dns.disablePrefetch", True)
  81. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  82. ff_prof.set_preference("permissions.default.image", 3)
  83. ff_prof.set_preference("browser.download.folderList", 2)
  84. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  85. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  86. ff_prof.set_preference('network.proxy.type', 1)
  87. ff_prof.set_preference("network.proxy.socks_version", 5)
  88. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  89. ff_prof.set_preference('network.proxy.socks_port', 9150)
  90. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  91. ff_prof.set_preference("javascript.enabled", True)
  92. ff_prof.update_preferences()
  93. service = Service(config.get('TOR', 'geckodriver_path'))
  94. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  95. driver.maximize_window()
  96. return driver
  97. def getAccess():
  98. url = getFixedURL()
  99. driver = createFFDriver()
  100. try:
  101. driver.get(url)
  102. return driver
  103. except:
  104. driver.close()
  105. return 'down'
  106. # Saves the crawled html page
  107. def savePage(driver, page, url):
  108. cleanPage = cleanHTML(driver, page)
  109. filePath = getFullPathName(url)
  110. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  111. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  112. return
  113. # Gets the full path of the page to be saved along with its appropriate file name
  114. def getFullPathName(url):
  115. from MarketPlaces.Initialization.markets_mining import config, CURRENT_DATE
  116. mainDir = os.path.join(config.get('Project', 'shared_folder'), "MarketPlaces/" + getMKTName() + "/HTML_Pages")
  117. fileName = getNameFromURL(url)
  118. if isDescriptionLink(url):
  119. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Description\\' + fileName + '.html')
  120. else:
  121. fullPath = os.path.join(mainDir, CURRENT_DATE + r'\\Listing\\' + fileName + '.html')
  122. return fullPath
  123. # Creates the file name from passed URL
  124. def getNameFromURL(url):
  125. global counter
  126. name = ''.join(e for e in url if e.isalnum())
  127. if name == '':
  128. name = str(counter)
  129. counter = counter + 1
  130. return name
  131. # FIX
  132. def getInterestedLinks():
  133. links = []
  134. # # hire hacker
  135. # links.append('http://niejmptjzwhlfywruoab4pbuxg7kp2mtcr4c6mgpeykju5matewg36yd.onion/?sub_id=36')
  136. # hacker
  137. links.append('http://niejmptjzwhlfywruoab4pbuxg7kp2mtcr4c6mgpeykju5matewg36yd.onion/?sub_id=34')
  138. return links
  139. def crawlForum(driver):
  140. print("Crawling the PabloEscobarMarket market")
  141. linksToCrawl = getInterestedLinks()
  142. i = 0
  143. while i < len(linksToCrawl):
  144. link = linksToCrawl[i]
  145. print('Crawling :', link)
  146. try:
  147. has_next_page = True
  148. count = 0
  149. while has_next_page:
  150. try:
  151. driver.get(link)
  152. except:
  153. driver.refresh()
  154. html = driver.page_source
  155. savePage(driver, html, link)
  156. list = productPages(html)
  157. for item in list:
  158. itemURL = urlparse.urljoin(baseURL, str(item))
  159. try:
  160. driver.get(itemURL)
  161. except:
  162. driver.refresh()
  163. savePage(driver, driver.page_source, item)
  164. driver.back()
  165. # comment out
  166. break
  167. # comment out
  168. if count == 1:
  169. break
  170. try:
  171. link = driver.find_element(by=By.XPATH, value='//a[@rel="next"]').get_attribute('href')
  172. if link == "":
  173. raise NoSuchElementException
  174. count += 1
  175. except NoSuchElementException:
  176. has_next_page = False
  177. except Exception as e:
  178. print(link, e)
  179. i += 1
  180. print("Crawling the PabloEscobarMarket market done.")
  181. # Returns 'True' if the link is Topic link, may need to change for every website
  182. def isDescriptionLink(url):
  183. if 'single_product' in url:
  184. return True
  185. return False
  186. # Returns True if the link is a listingPage link, may need to change for every website
  187. def isListingLink(url):
  188. if 'sub_id' in url:
  189. return True
  190. return False
  191. # calling the parser to define the links
  192. def productPages(html):
  193. soup = BeautifulSoup(html, "html.parser")
  194. return pabloescobarmarket_links_parser(soup)
  195. def crawler():
  196. startCrawling()
  197. # print("Crawling and Parsing PabloEscobarMarket .... DONE!")