this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

385 lines
13 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. import codecs
  3. import glob
  4. import os, re
  5. import shutil
  6. from psycopg2.extras import RealDictCursor
  7. from Forums.DB_Connection.db_connection import *
  8. from Forums.BestCardingWorld.parser import *
  9. from Forums.CryptBB.parser import *
  10. from Forums.Incogsnoo.parser import *
  11. from Forums.Classifier.classify_product import predict
  12. # from DarkWebMining_Sample.Forums.Classifier.classify_product import predict_semi
  13. # controls the log id
  14. nError = 0
  15. # determines if forum is russian, not really used now but maybe later
  16. def isRussianForum(forum):
  17. with open('russian_forums.txt') as f:
  18. forums = f.readlines()
  19. result = False
  20. for iforum in forums:
  21. iforum = iforum.replace('\n','')
  22. if iforum == forum:
  23. result = True
  24. break
  25. return result
  26. #tries to match description pages to listing pages by using a key made for every description page and every link in listing page
  27. #once verified and matched, the info is merged into a 'rec', which is returned
  28. #@param: detPage is a list of keys of valid pages, rec is the row of data of an instance
  29. #return: rec, row of data, that may have additional data added on after matching description to listing page
  30. def mergePages(rmm, rec):
  31. # key = u"Top:" + rec[1].upper().strip() + u" User:" + rec[5].upper().strip()
  32. # key = rec[16]
  33. print ("----------------- Matched: " + rec[3] + "--------------------")
  34. if rmm[9] != "-1": # image_user
  35. rec[9] = rmm[9]
  36. rec[10] = rmm[1]
  37. rec[11] = rmm[2]
  38. rec[12] = rmm[3]
  39. rec[13] = rmm[4]
  40. rec[14] = rmm[5]
  41. rec[15] = rmm[6]
  42. rec[16] = rmm[7]
  43. rec[17] = rmm[8]
  44. rec[18] = rmm[10]
  45. return rec
  46. #gets a string of posts and joins them together into one string to be put in the database as one string of text
  47. #@param: list of strings (the posts of a thread)
  48. #return: string containing the concatenation of all the strings
  49. def getPosts(posts):
  50. strPosts = ' '.join(posts)
  51. return strPosts.strip()
  52. #uses db connection , another program, methods to persists values to the correct categories
  53. #@param: row is the list of entries for this instance, cur is the db connection object
  54. def persist_data(url, row, cur):
  55. forum = create_forum(cur, row, url)
  56. author = create_author(cur, row, forum)
  57. topic = create_topic(cur, forum, row, author)
  58. create_posts(cur, row, forum, topic)
  59. def incrementError():
  60. global nError
  61. nError += 1
  62. def read_file(filePath, createLog, logFile):
  63. try:
  64. html = codecs.open(filePath.strip('\n'), encoding='utf8')
  65. soup = BeautifulSoup(html, "html.parser")
  66. html.close()
  67. time.sleep(0.01) # making sure the file is closed before returning soup object
  68. return soup
  69. except:
  70. try:
  71. html = open(filePath.strip('\n'))
  72. soup = BeautifulSoup(html, "html.parser")
  73. html.close()
  74. time.sleep(0.01) # making sure the file is closed before returning soup object
  75. return soup
  76. except:
  77. incrementError()
  78. print("There was a problem to read the file " + filePath)
  79. if createLog:
  80. logFile.write(
  81. str(nError) + ". There was a problem to read the file " + filePath + "\n" + traceback.format_exc() + "\n")
  82. return None
  83. def parse_listing(forum, listingFile, soup, createLog, logFile):
  84. try:
  85. if forum == "BestCardingWorld":
  86. rw = bestcardingworld_listing_parser(soup)
  87. elif forum == "CryptBB":
  88. rw = cryptBB_listing_parser(soup)
  89. elif forum == "Incogsnoo":
  90. rw = incogsnoo_listing_parser(soup)
  91. else:
  92. print("MISSING CALL TO LISTING PARSER IN PREPARE_PARSER.PY!")
  93. raise Exception
  94. return rw
  95. except:
  96. incrementError()
  97. print("There was a problem to parse the file " + listingFile + " in the listing section!")
  98. traceback.print_exc()
  99. if createLog:
  100. logFile.write(
  101. str(nError) + ". There was a problem to parse the file " + listingFile + " in the Listing section.\n"
  102. + traceback.format_exc() + "\n")
  103. return None
  104. def parse_description(forum, descriptionFile, soup, createLog, logFile):
  105. try:
  106. if forum == "BestCardingWorld":
  107. rmm = bestcardingworld_description_parser(soup)
  108. elif forum == "CryptBB":
  109. rmm = cryptBB_description_parser(soup)
  110. elif forum == "Incogsnoo":
  111. rmm = incogsnoo_description_parser(soup)
  112. else:
  113. print("MISSING CALL TO DESCRIPTION PARSER IN PREPARE_PARSER.PY!")
  114. raise Exception
  115. return rmm
  116. except:
  117. incrementError()
  118. print("There was a problem to parse the file " + descriptionFile + " in the Description section!")
  119. traceback.print_exc()
  120. if createLog:
  121. logFile.write(
  122. str(nError) + ". There was a problem to parse the file " + descriptionFile + " in the Description section.\n"
  123. + traceback.format_exc() + "\n")
  124. return None
  125. def persist_record(url, rec, cur, con, createLog, logFile, listingFile, descriptionFile):
  126. try:
  127. persist_data(url, tuple(rec), cur)
  128. con.commit()
  129. return True
  130. except:
  131. con.rollback()
  132. incrementError()
  133. print(f"There was a problem to persist the files ({listingFile} + {descriptionFile}) in the database!")
  134. traceback.print_exc()
  135. if createLog:
  136. logFile.write(
  137. str(nError) + f". There was a problem to persist the files ({listingFile} + {descriptionFile}) in the database!\n"
  138. + traceback.format_exc() + "\n")
  139. return False
  140. def move_file(filePath, createLog, logFile):
  141. source = filePath
  142. destination = filePath.replace(os.path.basename(filePath), "") + 'Read\\' + os.path.basename(filePath)
  143. try:
  144. shutil.move(source, destination, shutil.copy2)
  145. return True
  146. except:
  147. try:
  148. shutil.move(source, destination, shutil.copytree)
  149. return True
  150. except:
  151. incrementError()
  152. print("There was a problem to move the file " + filePath)
  153. traceback.print_exc()
  154. if createLog:
  155. logFile.write(
  156. str(nError) + ". There was a problem to move the file " + filePath + "\n" + traceback.format_exc() + "\n")
  157. return False
  158. #main method for this program, what actually gets the parsed info from the parser, and persists them into the db
  159. #calls the different parser methods here depending on the type of html page
  160. def new_parse(forum, url, createLog):
  161. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  162. global nError
  163. nError = 0
  164. print("Parsing the " + forum + " forum and conduct data classification to store the information in the database.")
  165. # Connecting to the database
  166. con = connectDataBase()
  167. cur = con.cursor(cursor_factory=RealDictCursor)
  168. # Creating the tables (The database should be created manually)
  169. create_database(cur, con)
  170. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums\\" + forum + "\\HTML_Pages")
  171. # Creating the log file for each Forum
  172. if createLog:
  173. try:
  174. logFile = open(mainDir + f"/{CURRENT_DATE}/" + forum + "_" + CURRENT_DATE + ".log", "w")
  175. except:
  176. print("Could not open log file!")
  177. createLog = False
  178. logFile = None
  179. # raise SystemExit
  180. else:
  181. logFile = None
  182. # Reading the Listing Html Pages
  183. listings = glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Listing", '*.html'))
  184. listings.sort(key=os.path.getmtime)
  185. for listingIndex, listingFile in enumerate(listings):
  186. print("Reading listing folder of '" + forum + "', file '" + os.path.basename(listingFile) + "', index= " + str(
  187. listingIndex + 1) + " ... " + str(len(listings)))
  188. listingSoup = read_file(listingFile, createLog, logFile)
  189. # listing flags
  190. doParseListing = listingSoup is not None
  191. doDescription = False
  192. readDescriptionError = False
  193. parseDescriptionError = False
  194. persistDescriptionError = False
  195. moveDescriptionError = False
  196. findDescriptionError = False
  197. rw = []
  198. if doParseListing:
  199. rw = parse_listing(forum, listingFile, listingSoup, createLog, logFile)
  200. doDescription = rw is not None
  201. if doDescription:
  202. nFound = 0
  203. for rec in rw:
  204. rec = rec.split(',')
  205. descriptionPattern = cleanLink(rec[6]) + "page[0-9]*.html"
  206. # Reading the associated description Html Pages
  207. descriptions = glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Description", descriptionPattern))
  208. descriptions.sort(key=os.path.getmtime)
  209. nFound += len(descriptions)
  210. # Aggregate of posts from multiple description (topic) pages
  211. posts = []
  212. for descriptionIndex, descriptionFile in enumerate(descriptions):
  213. print("Reading description folder of '" + forum + "', file '" + os.path.basename(
  214. descriptionFile) + "', index= " + str(descriptionIndex + 1) + " ... " + str(len(descriptions)))
  215. descriptionSoup = read_file(descriptionFile, createLog, logFile)
  216. # description flags
  217. doParseDescription = descriptionSoup is not None
  218. doPersistRecord = False
  219. doMoveDescription = False
  220. rmm = []
  221. if doParseDescription:
  222. rmm = parse_description(forum, descriptionFile, descriptionSoup, createLog, logFile)
  223. doPersistRecord = rmm is not None
  224. else:
  225. readDescriptionError = True
  226. parseDescriptionError = True
  227. if doPersistRecord:
  228. # Combining the information from Listing and Description Pages
  229. rec = mergePages(rmm, rec)
  230. # Add the page's posts to aggregate
  231. posts += rec[15]
  232. # Classify on final description page
  233. if descriptionIndex == len(descriptions) - 1:
  234. # classification for topic based on all posts from all pages
  235. rec[19] = str(predict(rec[3], getPosts(posts), language='sup_english'))
  236. # Persisting the information in the database
  237. persistSuccess = persist_record(url, rec, cur, con, createLog, logFile, listingFile, descriptionFile)
  238. doMoveDescription = persistSuccess
  239. else:
  240. parseDescriptionError = True
  241. if doMoveDescription:
  242. # move description files of completed folder
  243. moveSuccess = move_file(descriptionFile, createLog, logFile)
  244. if not moveSuccess:
  245. moveDescriptionError = True
  246. else:
  247. moveDescriptionError = True
  248. if not (nFound > 0):
  249. findDescriptionError = True
  250. incrementError()
  251. print(f"There was a problem to locate the file(s) for {listingFile} in the Description section!")
  252. if createLog:
  253. logFile.write(
  254. str(nError) + f". There was a problem to locate the file(s) for {listingFile}"
  255. f" in the Description section!\n\n")
  256. if not (readDescriptionError or parseDescriptionError or persistDescriptionError
  257. or moveDescriptionError or findDescriptionError):
  258. # move listing files of completed folder
  259. move_file(listingFile, createLog, logFile)
  260. # registering the current forum status (up/down) and the number of scraped pages in the database
  261. forumId = verifyForum(cur, forum)
  262. if (forumId > 0):
  263. readListings = glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Listing\\read", '*.html'))
  264. readDescriptions = glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Description\\read", '*.html'))
  265. create_status(cur, forumId, CURRENT_DATE, len(readListings), len(readDescriptions), '1' if len(listings) > 0 else '0')
  266. con.commit()
  267. if createLog:
  268. logFile.close()
  269. cur.close()
  270. con.close()
  271. print("Parsing the " + forum + " forum and data classification done.")