this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

444 lines
15 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. import codecs
  3. import glob
  4. import os, re
  5. import shutil
  6. from psycopg2.extras import RealDictCursor
  7. from Forums.DB_Connection.db_connection import *
  8. from Forums.BestCardingWorld.parser import *
  9. from Forums.Classifier.classify_product import predict
  10. # from DarkWebMining_Sample.Forums.Classifier.classify_product import predict_semi
  11. # controls the log id
  12. nError = 0
  13. # determines if forum is russian, not really used now but maybe later
  14. def isRussianForum(forum):
  15. with open('russian_forums.txt') as f:
  16. forums = f.readlines()
  17. result = False
  18. for iforum in forums:
  19. iforum = iforum.replace('\n','')
  20. if iforum == forum:
  21. result = True
  22. break
  23. return result
  24. #tries to match description pages to listing pages by using a key made for every description page and every link in listing page
  25. #once verified and matched, the info is merged into a 'rec', which is returned
  26. #@param: detPage is a list of keys of valid pages, rec is the row of data of an instance
  27. #return: rec, row of data, that may have additional data added on after matching description to listing page
  28. def mergePages(rmm, rec):
  29. # key = u"Top:" + rec[1].upper().strip() + u" User:" + rec[5].upper().strip()
  30. # key = rec[16]
  31. print ("----------------- Matched: " + rec[3] + "--------------------")
  32. if rmm[9] != "-1": # image_user
  33. rec[9] = rmm[9]
  34. rec[10] = rmm[1]
  35. rec[11] = rmm[2]
  36. rec[12] = rmm[3]
  37. rec[13] = rmm[4]
  38. rec[14] = rmm[5]
  39. rec[15] = rmm[6]
  40. rec[16] = rmm[7]
  41. rec[17] = rmm[8]
  42. rec[18] = rmm[10]
  43. return rec
  44. #gets a string of posts and joins them together into one string to be put in the database as one string of text
  45. #@param: list of strings (the posts of a thread)
  46. #return: string containing the concatenation of all the strings
  47. def getPosts(posts):
  48. strPosts = ' '.join(posts)
  49. return strPosts.strip()
  50. #uses db connection , another program, methods to persists values to the correct categories
  51. #@param: row is the list of entries for this instance, cur is the db connection object
  52. def persist_data(url, row, cur):
  53. forum = create_forum(cur, row, url)
  54. author = create_author(cur, row, forum)
  55. topic = create_topic(cur, forum, row, author)
  56. create_posts(cur, row, forum, topic)
  57. def incrementError():
  58. global nError
  59. nError += 1
  60. def read_file(filePath, createLog, logFile):
  61. try:
  62. html = codecs.open(filePath.strip('\n'), encoding='utf8')
  63. soup = BeautifulSoup(html, "html.parser")
  64. html.close()
  65. time.sleep(0.01) # making sure the file is closed before returning soup object
  66. return soup
  67. except:
  68. try:
  69. html = open(filePath.strip('\n'))
  70. soup = BeautifulSoup(html, "html.parser")
  71. html.close()
  72. time.sleep(0.01) # making sure the file is closed before returning soup object
  73. return soup
  74. except:
  75. incrementError()
  76. print("There was a problem to read the file " + filePath)
  77. if createLog:
  78. logFile.write(
  79. str(nError) + ". There was a problem to read the file " + filePath + "\n" + traceback.format_exc() + "\n")
  80. return None
  81. def parse_listing(forum, listingFile, soup, createLog, logFile):
  82. try:
  83. if forum == "BestCardingWorld":
  84. rw = bestcardingworld_listing_parser(soup)
  85. else:
  86. print("MISSING CALL TO LISTING PARSER IN PREPARE_PARSER.PY!")
  87. raise Exception
  88. return rw
  89. except:
  90. incrementError()
  91. print("There was a problem to parse the file " + listingFile + " in the listing section!")
  92. traceback.print_exc()
  93. if createLog:
  94. logFile.write(
  95. str(nError) + ". There was a problem to parse the file " + listingFile + " in the Listing section.\n"
  96. + traceback.format_exc() + "\n")
  97. return None
  98. def parse_description(forum, descriptionFile, soup, createLog, logFile):
  99. try:
  100. if forum == "BestCardingWorld":
  101. rmm = bestcardingworld_description_parser(soup)
  102. else:
  103. print("MISSING CALL TO DESCRIPTION PARSER IN PREPARE_PARSER.PY!")
  104. raise Exception
  105. return rmm
  106. except:
  107. incrementError()
  108. print("There was a problem to parse the file " + descriptionFile + " in the Description section!")
  109. traceback.print_exc()
  110. if createLog:
  111. logFile.write(
  112. str(nError) + ". There was a problem to parse the file " + descriptionFile + " in the Description section.\n"
  113. + traceback.format_exc() + "\n")
  114. return None
  115. def persist_record(url, rec, cur, con, createLog, logFile, listingFile, descriptionFile):
  116. try:
  117. persist_data(url, tuple(rec), cur)
  118. con.commit()
  119. return True
  120. except:
  121. con.rollback()
  122. incrementError()
  123. print(f"There was a problem to persist the files ({listingFile} + {descriptionFile}) in the database!")
  124. traceback.print_exc()
  125. if createLog:
  126. logFile.write(
  127. str(nError) + f". There was a problem to persist the files ({listingFile} + {descriptionFile}) in the database!\n"
  128. + traceback.format_exc() + "\n")
  129. return False
  130. def move_file(filePath, createLog, logFile):
  131. source = filePath
  132. destination = filePath.replace(os.path.basename(filePath), "") + 'Read\\' + os.path.basename(filePath)
  133. try:
  134. shutil.move(source, destination, shutil.copy2)
  135. return True
  136. except:
  137. try:
  138. shutil.move(source, destination, shutil.copytree)
  139. return True
  140. except:
  141. incrementError()
  142. print("There was a problem to move the file " + filePath)
  143. traceback.print_exc()
  144. if createLog:
  145. logFile.write(
  146. str(nError) + ". There was a problem to move the file " + filePath + "\n" + traceback.format_exc() + "\n")
  147. return False
  148. #main method for this program, what actually gets the parsed info from the parser, and persists them into the db
  149. #calls the different parser methods here depending on the type of html page
  150. def new_parse(forum, url, createLog):
  151. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  152. global nError
  153. nError = 0
  154. print("Parsing the " + forum + " forum and conduct data classification to store the information in the database.")
  155. # Connecting to the database
  156. con = connectDataBase()
  157. cur = con.cursor(cursor_factory=RealDictCursor)
  158. # Creating the tables (The database should be created manually)
  159. create_database(cur, con)
  160. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums\\" + forum + "\\HTML_Pages")
  161. # Creating the log file for each Forum
  162. if createLog:
  163. try:
  164. logFile = open(mainDir + f"/{CURRENT_DATE}/" + forum + "_" + CURRENT_DATE + ".log", "w")
  165. except:
  166. try:
  167. html = open(line2.strip('\n'))
  168. soup = BeautifulSoup(html, "html.parser")
  169. html.close()
  170. except:
  171. nError += 1
  172. print("There was a problem to read the file " + line2 + " in the Description section!")
  173. if createLog:
  174. logFile.write(str(nError) + ". There was a problem to read the file " + line2 + " in the Description section!\n")
  175. continue
  176. try:
  177. if forum == "BestCardingWorld":
  178. rmm = bestcardingworld_description_parser(soup)
  179. elif forum == "Cardingleaks":
  180. rmm = cardingleaks_description_parser(soup)
  181. elif forum == "CryptBB":
  182. rmm = cryptBB_description_parser(soup)
  183. elif forum == "OnniForums":
  184. rmm = onniForums_description_parser(soup)
  185. elif forum == "Altenens":
  186. rmm = altenens_description_parser(soup)
  187. elif forum == "Procrax":
  188. rmm = procrax_description_parser(soup)
  189. elif forum == "Libre":
  190. rmm = libre_description_parser(soup)
  191. elif forum == "HiddenAnswers":
  192. rmm = HiddenAnswers_description_parser(soup)
  193. # key = u"Top:" + rmm[0].upper().strip() + u" User:" + rmm[2][0].upper().strip()
  194. key = u"Url:" + os.path.basename(line2).replace(".html", "")
  195. # check if "page1" exists at the end of a string
  196. # if yes add to first page directory if no add to other
  197. check = re.compile(r'page1$')
  198. if check.search(key):
  199. # print(key, 'is a first page\n')
  200. detPage[key] = {'rmm': rmm, 'files': [os.path.basename(line2)]}
  201. else:
  202. # print(key, 'is an other page\n')
  203. other[key] = {'rmm': rmm, 'filename': os.path.basename(line2)}
  204. print("Could not open log file!")
  205. createLog = False
  206. logFile = None
  207. # raise SystemExit
  208. else:
  209. logFile = None
  210. # Reading the Listing Html Pages
  211. listings = glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Listing", '*.html'))
  212. for listingIndex, listingFile in enumerate(listings):
  213. print("Reading listing folder of '" + forum + "', file '" + os.path.basename(listingFile) + "', index= " + str(
  214. listingIndex + 1) + " ... " + str(len(listings)))
  215. listingSoup = read_file(listingFile, createLog, logFile)
  216. # listing flags
  217. doParseListing = listingSoup is not None
  218. doDescription = False
  219. readDescriptionError = False
  220. parseDescriptionError = False
  221. persistDescriptionError = False
  222. moveDescriptionError = False
  223. findDescriptionError = False
  224. rw = []
  225. if doParseListing:
  226. rw = parse_listing(forum, listingFile, listingSoup, createLog, logFile)
  227. doDescription = rw is not None
  228. if doDescription:
  229. if not readError:
  230. parseError = False
  231. try:
  232. if forum == "BestCardingWorld":
  233. rw = bestcardingworld_listing_parser(soup)
  234. elif forum == "Cardingleaks":
  235. rw = cardingleaks_listing_parser(soup)
  236. elif forum == "CryptBB":
  237. rw = cryptBB_listing_parser(soup)
  238. elif forum == "OnniForums":
  239. rw = onniForums_listing_parser(soup)
  240. elif forum == "Altenens":
  241. rw = altenens_listing_parser(soup)
  242. elif forum == "Procrax":
  243. rw = procrax_listing_parser(soup)
  244. elif forum == "Libre":
  245. rw = libre_listing_parser(soup)
  246. elif forum == "HiddenAnswers":
  247. rw = HiddenAnswers_listing_parser(soup)
  248. except:
  249. nError += 1
  250. print("There was a problem to read the file " + line1 + " in the listing section!")
  251. traceback.print_exc()
  252. if createLog:
  253. logFile.write(
  254. str(nError) + ". There was a problem to read the file " + line1 + " in the Listing section.\n")
  255. parseError = True
  256. if not parseError:
  257. persistError = False
  258. moveError = False
  259. num_in_db = 0
  260. num_persisted_moved = 0
  261. nFound = 0
  262. for rec in rw:
  263. rec = rec.split(',')
  264. descriptionPattern = cleanLink(rec[6]) + "page[0-9]*.html"
  265. # Reading the associated description Html Pages
  266. descriptions = glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Description", descriptionPattern))
  267. nFound += len(descriptions)
  268. for descriptionIndex, descriptionFile in enumerate(descriptions):
  269. print("Reading description folder of '" + forum + "', file '" + os.path.basename(
  270. descriptionFile) + "', index= " + str(descriptionIndex + 1) + " ... " + str(len(descriptions)))
  271. descriptionSoup = read_file(descriptionFile, createLog, logFile)
  272. # description flags
  273. doParseDescription = descriptionSoup is not None
  274. doPersistRecord = False
  275. doMoveDescription = False
  276. rmm = []
  277. if doParseDescription:
  278. rmm = parse_description(forum, descriptionFile, descriptionSoup, createLog, logFile)
  279. doPersistRecord = rmm is not None
  280. else:
  281. readDescriptionError = True
  282. parseDescriptionError = True
  283. if doPersistRecord:
  284. # Combining the information from Listing and Description Pages
  285. rec = mergePages(rmm, rec)
  286. # Append to the list the classification of the topic
  287. rec.append(str(predict(rec[3], getPosts(rec[15]), language='sup_english')))
  288. # Persisting the information in the database
  289. persistSuccess = persist_record(url, rec, cur, con, createLog, logFile, listingFile, descriptionFile)
  290. doMoveDescription = persistSuccess
  291. else:
  292. parseDescriptionError = True
  293. if doMoveDescription:
  294. # move description files of completed folder
  295. moveSuccess = move_file(descriptionFile, createLog, logFile)
  296. if not moveSuccess:
  297. moveDescriptionError = True
  298. else:
  299. moveDescriptionError = True
  300. if not (nFound > 0):
  301. findDescriptionError = True
  302. incrementError()
  303. print(f"There was a problem to locate the file(s) for {listingFile} in the Description section!")
  304. if createLog:
  305. logFile.write(
  306. str(nError) + f". There was a problem to locate the file(s) for {listingFile}"
  307. f" in the Description section!\n\n")
  308. if not (readDescriptionError or parseDescriptionError or persistDescriptionError
  309. or moveDescriptionError or findDescriptionError):
  310. # move listing files of completed folder
  311. move_file(listingFile, createLog, logFile)
  312. # registering the current forum status (up/down) and the number of scraped pages in the database
  313. forumId = verifyForum(cur, forum)
  314. if (forumId > 0):
  315. readListings = glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Listing\\read", '*.html'))
  316. readDescriptions = glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Description\\read", '*.html'))
  317. create_status(cur, forumId, CURRENT_DATE, len(readListings), len(readDescriptions), '1' if len(listings) > 0 else '0')
  318. con.commit()
  319. if createLog:
  320. logFile.close()
  321. cur.close()
  322. con.close()
  323. print("Parsing the " + forum + " forum and data classification done.")