this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

291 lines
10 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. '''
  3. BestCardingWorld Forum Crawler (Selenium)
  4. '''
  5. from selenium import webdriver
  6. from selenium.common.exceptions import NoSuchElementException
  7. from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
  8. from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
  9. from selenium.webdriver.firefox.service import Service
  10. from selenium.webdriver.common.by import By
  11. import urllib.parse as urlparse
  12. import os, time
  13. from datetime import date
  14. import subprocess
  15. from bs4 import BeautifulSoup
  16. from Forums.Initialization.prepare_parser import new_parse
  17. from Forums.BestCardingWorld.parser import bestcardingworld_links_parser
  18. from Forums.Utilities.utilities import cleanHTML
  19. counter = 1
  20. baseURL = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/'
  21. # Opens Tor Browser, crawls the website, then parses, then closes tor
  22. #acts like the main method for the crawler, another function at the end of this code calls this function later
  23. def startCrawling():
  24. # opentor()
  25. forumName = getForumName()
  26. # driver = getAccess()
  27. # if driver != 'down':
  28. # try:
  29. # crawlForum(driver)
  30. # except Exception as e:
  31. # print(driver.current_url, e)
  32. # closetor(driver)
  33. new_parse(forumName, False)
  34. # Opens Tor Browser
  35. #prompts for ENTER input to continue
  36. def opentor():
  37. global pid
  38. print("Connecting Tor...")
  39. path = open('../../path.txt').readline().strip()
  40. pro = subprocess.Popen(path)
  41. pid = pro.pid
  42. time.sleep(7.5)
  43. input('Tor Connected. Press ENTER to continue\n')
  44. return
  45. # Returns the name of the website
  46. #return: name of site in string type
  47. def getForumName():
  48. name = 'BestCardingWorld'
  49. return name
  50. # Return the base link of the website
  51. #return: url of base site in string type
  52. def getFixedURL():
  53. url = 'http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/'
  54. return url
  55. # Closes Tor Browser
  56. #@param: current selenium driver
  57. def closetor(driver):
  58. global pid
  59. # os.system("taskkill /pid " + str(pro.pid))
  60. os.system("taskkill /t /f /im tor.exe")
  61. print('Closing Tor...')
  62. driver.close()
  63. time.sleep(3)
  64. return
  65. # Creates FireFox 'driver' and configure its 'Profile'
  66. # to use Tor proxy and socket
  67. def createFFDriver():
  68. file = open('../../path.txt', 'r')
  69. lines = file.readlines()
  70. ff_binary = FirefoxBinary(lines[0].strip())
  71. ff_prof = FirefoxProfile(lines[1].strip())
  72. ff_prof.set_preference("places.history.enabled", False)
  73. ff_prof.set_preference("privacy.clearOnShutdown.offlineApps", True)
  74. ff_prof.set_preference("privacy.clearOnShutdown.passwords", True)
  75. ff_prof.set_preference("privacy.clearOnShutdown.siteSettings", True)
  76. ff_prof.set_preference("privacy.sanitize.sanitizeOnShutdown", True)
  77. ff_prof.set_preference("signon.rememberSignons", False)
  78. ff_prof.set_preference("network.cookie.lifetimePolicy", 2)
  79. ff_prof.set_preference("network.dns.disablePrefetch", True)
  80. ff_prof.set_preference("network.http.sendRefererHeader", 0)
  81. ff_prof.set_preference("permissions.default.image", 2)
  82. ff_prof.set_preference("browser.download.folderList", 2)
  83. ff_prof.set_preference("browser.download.manager.showWhenStarting", False)
  84. ff_prof.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain")
  85. ff_prof.set_preference('network.proxy.type', 1)
  86. ff_prof.set_preference("network.proxy.socks_version", 5)
  87. ff_prof.set_preference('network.proxy.socks', '127.0.0.1')
  88. ff_prof.set_preference('network.proxy.socks_port', 9150)
  89. ff_prof.set_preference('network.proxy.socks_remote_dns', True)
  90. ff_prof.set_preference("javascript.enabled", True)
  91. ff_prof.update_preferences()
  92. service = Service(lines[2].strip())
  93. driver = webdriver.Firefox(firefox_binary=ff_binary, firefox_profile=ff_prof, service=service)
  94. return driver
  95. #the driver 'gets' the url, attempting to get on the site, if it can't access return 'down'
  96. #return: return the selenium driver or string 'down'
  97. def getAccess():
  98. url = getFixedURL()
  99. driver = createFFDriver()
  100. try:
  101. driver.get(url)
  102. return driver
  103. except:
  104. driver.close()
  105. return 'down'
  106. # Saves the crawled html page, makes the directory path for html pages if not made
  107. def savePage(page, url):
  108. cleanPage = cleanHTML(page)
  109. filePath = getFullPathName(url)
  110. os.makedirs(os.path.dirname(filePath), exist_ok=True)
  111. open(filePath, 'wb').write(cleanPage.encode('utf-8'))
  112. return
  113. # Gets the full path of the page to be saved along with its appropriate file name
  114. #@param: raw url as crawler crawls through every site
  115. def getFullPathName(url):
  116. fileName = getNameFromURL(url)
  117. if isDescriptionLink(url):
  118. fullPath = r'C:\Users\fakeguy\Documents\threatIntelligence-main\DarkWebMining_Working\Forums\BestCardingWorld\HTML_Pages\\' + str(
  119. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  120. "%04d" % date.today().year) + r'\\' + r'Description\\' + fileName + '.html'
  121. else:
  122. fullPath = r'C:\Users\fakeguy\Documents\threatIntelligence-main\DarkWebMining_Working\Forums\BestCardingWorld\HTML_Pages\\' + str(
  123. "%02d" % date.today().month) + str("%02d" % date.today().day) + str(
  124. "%04d" % date.today().year) + r'\\' + r'Listing\\' + fileName + '.html'
  125. return fullPath
  126. # Creates the file name from passed URL, gives distinct name if can't be made unique after cleaned
  127. #@param: raw url as crawler crawls through every site
  128. def getNameFromURL(url):
  129. global counter
  130. name = ''.join(e for e in url if e.isalnum())
  131. if (name == ''):
  132. name = str(counter)
  133. counter = counter + 1
  134. return name
  135. # returns list of urls, here is where you can list the different urls of interest, the crawler runs through this list
  136. #in this example, there are a couple of categories some threads fall under such as
  137. #exploits, malware, and hacking tutorials
  138. def getInterestedLinks():
  139. links = []
  140. # Penetration Tests
  141. links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=43')
  142. # # Social Engineering Tests
  143. links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=44')
  144. # # Exploits
  145. links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=45')
  146. # # Tools
  147. links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=46')
  148. # # Malware
  149. links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=47')
  150. # # Cryptography
  151. links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=48')
  152. # # Others
  153. links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=49')
  154. # # Hacking Tutorials
  155. links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=50')
  156. # # Hacked Accounts and Database Dumps
  157. # links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=30')
  158. # # Android Moded pak
  159. links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=53')
  160. #General Discussion
  161. # links.append('http://bestteermb42clir6ux7xm76d4jjodh3fpahjqgbddbmfrgp4skg2wqd.onion/viewforum.php?f=16&sid=6a4959d49be41e72944e5aa5684c187a')
  162. return links
  163. # gets links of interest to crawl through, iterates through list, where each link is clicked and crawled through
  164. #topic and description pages are crawled through here, where both types of pages are saved
  165. #@param: selenium driver
  166. def crawlForum(driver):
  167. print("Crawling the BestCardingWorld forum")
  168. linksToCrawl = getInterestedLinks()
  169. visited = set(linksToCrawl)
  170. initialTime = time.time()
  171. i = 0
  172. while i < len(linksToCrawl):
  173. link = linksToCrawl[i]
  174. print('Crawling :', link)
  175. try:
  176. try:
  177. driver.get(link)
  178. except:
  179. driver.refresh()
  180. html = driver.page_source
  181. savePage(html, link)
  182. has_next_page = True
  183. while has_next_page:
  184. list = topicPages(html)
  185. for item in list:
  186. itemURL = urlparse.urljoin(baseURL, str(item))
  187. try:
  188. driver.get(itemURL)
  189. except:
  190. driver.refresh()
  191. savePage(driver.page_source, item)
  192. driver.back()
  193. try:
  194. bar = driver.find_element(by=By.XPATH, value=
  195. '/html/body/div[1]/div[2]/div[2]/div[3]/ul')
  196. next = bar.find_element_by_class_name('next')
  197. link = next.find_element_by_tag_name('a').get_attribute('href')
  198. try:
  199. driver.get(link)
  200. except:
  201. driver.refresh()
  202. html = driver.page_source
  203. savePage(html, link)
  204. except NoSuchElementException:
  205. has_next_page = False
  206. except Exception as e:
  207. print(link, e)
  208. i += 1
  209. # finalTime = time.time()
  210. # print finalTime - initialTime
  211. input("Crawling BestCardingWorld forum done sucessfully. Press ENTER to continue\n")
  212. # Returns 'True' if the link is a description link
  213. #@param: url of any url crawled
  214. #return: true if is a description page, false if not
  215. def isDescriptionLink(url):
  216. if 'topic' in url:
  217. return True
  218. return False
  219. # Returns True if the link is a listingPage link
  220. #@param: url of any url crawled
  221. #return: true if is a Listing page, false if not
  222. def isListingLink(url):
  223. if 'forum' in url:
  224. return True
  225. return False
  226. # calling the parser to define the links, the html is the url of a link from the list of interested link list
  227. #@param: link from interested link list
  228. #return: list of description links that should be crawled through
  229. def topicPages(html):
  230. soup = BeautifulSoup(html, "html.parser")
  231. #print(soup.find('div', {"class": "forumbg"}).find('ul', {"class": "topiclist topics"}).find('li', {"class": "row bg1"}).find('a', {"class": "topictitle"}, href=True))
  232. return bestcardingworld_links_parser(soup)
  233. def crawler():
  234. startCrawling()
  235. # print("Crawling and Parsing BestCardingWorld .... DONE!")