this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

369 lines
14 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. import codecs
  3. import glob
  4. import os, re
  5. import shutil
  6. from Forums.DB_Connection.db_connection import *
  7. from Forums.BestCardingWorld.parser import *
  8. from Forums.Cardingleaks.parser import *
  9. from Forums.CryptBB.parser import *
  10. from Forums.OnniForums.parser import *
  11. from Forums.Altenens.parser import *
  12. from Forums.Procrax.parser import *
  13. from Forums.Libre.parser import *
  14. from Forums.HiddenAnswers.parser import *
  15. from Forums.Classifier.classify_product import predict
  16. # from DarkWebMining_Sample.Forums.Classifier.classify_product import predict_semi
  17. # determines if forum is russian, not really used now but maybe later
  18. def isRussianForum(forum):
  19. with open('russian_forums.txt') as f:
  20. forums = f.readlines()
  21. result = False
  22. for iforum in forums:
  23. iforum = iforum.replace('\n','')
  24. if iforum == forum:
  25. result = True
  26. break
  27. return result
  28. #tries to match description pages to listing pages by using a key made for every description page and every link in listing page
  29. #once verified and matched, the info is merged into a 'rec', which is returned
  30. #@param: detPage is a list of keys of valid pages, rec is the row of data of an instance
  31. #return: rec, row of data, that may have additional data added on after matching description to listing page
  32. def mergePages(rmm, rec):
  33. # key = u"Top:" + rec[1].upper().strip() + u" User:" + rec[5].upper().strip()
  34. # key = rec[16]
  35. print ("----------------- Matched: " + rec[3] + "--------------------")
  36. rec[9] = rmm[1]
  37. rec[10] = rmm[2]
  38. rec[11] = rmm[3]
  39. rec[12] = rmm[4]
  40. rec[13] = rmm[5]
  41. rec[14] = rmm[6]
  42. rec[15] = rmm[7]
  43. rec[16] = rmm[8]
  44. return rec
  45. #gets a string of posts and joins them together into one string to be put in the database as one string of text
  46. #@param: list of strings (the posts of a thread)
  47. #return: string containing the concatenation of all the strings
  48. def getPosts(posts):
  49. strPosts = ' '.join(posts)
  50. return strPosts.strip()
  51. #uses db connection , another program, methods to persists values to the correct categories
  52. #@param: row is the list of entries for this instance, cur is the db connection object
  53. def persist_data(url, row, cur):
  54. forum = create_forum(cur, row, url)
  55. board = create_board(cur, row, forum)
  56. author = create_user(cur, row, forum, 0)
  57. topic = create_topic(cur, row, forum, board, author)
  58. create_posts(cur, row, forum, board, topic)
  59. #main method for this program, what actually gets the parsed info from the parser, and persists them into the db
  60. #calls the different parser methods here depending on the type of html page
  61. def new_parse(forum, url, createLog):
  62. from Forums.Initialization.forums_mining import config, CURRENT_DATE
  63. print("Parsing The " + forum + " Forum and conduct data classification to store the information in the database.")
  64. # ini = time.time()
  65. # Connecting to the database
  66. con = connectDataBase()
  67. cur = con.cursor()
  68. # Creating the tables (The database should be created manually)
  69. create_database(cur, con)
  70. nError = 0
  71. lines = [] # listing pages
  72. lns = [] # description pages
  73. detPage = {} # first pages
  74. other = {} # other pages
  75. # Creating the log file for each Forum
  76. if createLog:
  77. if not os.path.exists("./" + forum + "/Logs/" + forum + "_" + CURRENT_DATE + ".log"):
  78. logFile = open("./" + forum + "/Logs/" + forum + "_" + CURRENT_DATE + ".log", "w")
  79. else:
  80. print("Files of the date " + CURRENT_DATE + " from the Forum " + forum +
  81. " were already read. Delete the referent information in the Data Base and also delete the log file"
  82. " in the _Logs folder to read files from this Forum of this date again.")
  83. raise SystemExit
  84. mainDir = os.path.join(config.get('Project', 'shared_folder'), "Forums/" + forum + "/HTML_Pages")
  85. # Reading the Listing Html Pages
  86. for fileListing in glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Listing", '*.html')):
  87. lines.append(fileListing)
  88. # Reading the Description Html Pages
  89. for fileDescription in glob.glob(os.path.join(mainDir, CURRENT_DATE + "\\Description", '*.html')):
  90. lns.append(fileDescription)
  91. # Parsing the Description Pages and put the tag's content into a dictionary (Hash table)
  92. for index, line2 in enumerate(lns):
  93. print("Reading description folder of '" + forum + "', file '" + os.path.basename(line2) + "', index= " + str(index + 1) + " ... " + str(len(lns)))
  94. try:
  95. html = codecs.open(line2.strip('\n'), encoding='utf8')
  96. soup = BeautifulSoup(html, "html.parser")
  97. html.close()
  98. except:
  99. try:
  100. html = open(line2.strip('\n'))
  101. soup = BeautifulSoup(html, "html.parser")
  102. html.close()
  103. except:
  104. nError += 1
  105. print("There was a problem to read the file " + line2 + " in the Description section!")
  106. if createLog:
  107. logFile.write(str(nError) + ". There was a problem to read the file " + line2 + " in the Description section!\n")
  108. continue
  109. try:
  110. if forum == "BestCardingWorld":
  111. rmm = bestcardingworld_description_parser(soup)
  112. elif forum == "Cardingleaks":
  113. rmm = cardingleaks_description_parser(soup)
  114. elif forum == "CryptBB":
  115. rmm = cryptBB_description_parser(soup)
  116. elif forum == "OnniForums":
  117. rmm = onniForums_description_parser(soup)
  118. elif forum == "Altenens":
  119. rmm = altenens_description_parser(soup)
  120. elif forum == "Procrax":
  121. rmm = procrax_description_parser(soup)
  122. elif forum == "Libre":
  123. rmm = libre_description_parser(soup)
  124. elif forum == "HiddenAnswers":
  125. rmm = HiddenAnswers_description_parser(soup)
  126. # key = u"Top:" + rmm[0].upper().strip() + u" User:" + rmm[2][0].upper().strip()
  127. key = u"Url:" + os.path.basename(line2).replace(".html", "")
  128. # check if "page1" exists at the end of a string
  129. # if yes add to first page directory if no add to other
  130. check = re.compile(r'page1$')
  131. if check.search(key):
  132. # print(key, 'is a first page\n')
  133. detPage[key] = {'rmm': rmm, 'files': [os.path.basename(line2)]}
  134. else:
  135. # print(key, 'is an other page\n')
  136. other[key] = {'rmm': rmm, 'filename': os.path.basename(line2)}
  137. except:
  138. nError += 1
  139. print("There was a problem to parse the file " + line2 + " in the Description section!")
  140. traceback.print_exc()
  141. if createLog:
  142. logFile.write(str(nError) + ". There was a problem to parse the file " + line2 + " in the Description section.\n")
  143. # goes through keys from detPage and other, checks if the keys match.
  144. # if yes adds other[key] values to detPage w/o overwritting
  145. for key in detPage.keys():
  146. for k in list(other.keys()):
  147. checkkey = str(key[4:])
  148. checkk = str(k[4:])
  149. if checkkey in checkk:
  150. detPage[key]['rmm'][1].extend(other[k]['rmm'][1])
  151. detPage[key]['rmm'][2].extend(other[k]['rmm'][2])
  152. detPage[key]['rmm'][3].extend(other[k]['rmm'][3])
  153. detPage[key]['rmm'][4].extend(other[k]['rmm'][4])
  154. detPage[key]['rmm'][5].extend(other[k]['rmm'][5])
  155. detPage[key]['rmm'][6].extend(other[k]['rmm'][6])
  156. detPage[key]['rmm'][7].extend(other[k]['rmm'][7])
  157. detPage[key]['rmm'][8].extend(other[k]['rmm'][8])
  158. detPage[key]['files'].append(other[k]['filename'])
  159. other.pop(k)
  160. # Parsing the Listing Pages and put the tag's content into a list
  161. for index, line1 in enumerate(lines):
  162. print("Reading listing folder of '" + forum + "', file '" + os.path.basename(line1) + "', index= " + str(index + 1) + " ... " + str(len(lines)))
  163. readError = False
  164. try:
  165. html = codecs.open(line1.strip('\n'), encoding='utf8')
  166. soup = BeautifulSoup(html, "html.parser")
  167. html.close()
  168. except:
  169. try:
  170. html = open(line1.strip('\n'))
  171. soup = BeautifulSoup(html, "html.parser")
  172. html.close()
  173. except:
  174. nError += 1
  175. print("There was a problem to read the file " + line1 + " in the Listing section!")
  176. if createLog:
  177. logFile.write(str(nError) + ". There was a problem to read the file " + line1 + " in the Listing section.\n")
  178. readError = True
  179. if not readError:
  180. parseError = False
  181. try:
  182. if forum == "BestCardingWorld":
  183. rw = bestcardingworld_listing_parser(soup)
  184. elif forum == "Cardingleaks":
  185. rw = cardingleaks_listing_parser(soup)
  186. elif forum == "CryptBB":
  187. rw = cryptBB_listing_parser(soup)
  188. elif forum == "OnniForums":
  189. rw = onniForums_listing_parser(soup)
  190. elif forum == "Altenens":
  191. rw = altenens_listing_parser(soup)
  192. elif forum == "Procrax":
  193. rw = procrax_listing_parser(soup)
  194. elif forum == "Libre":
  195. rw = libre_listing_parser(soup)
  196. elif forum == "HiddenAnswers":
  197. rw = HiddenAnswers_listing_parser(soup)
  198. except:
  199. nError += 1
  200. print("There was a problem to read the file " + line1 + " in the listing section!")
  201. traceback.print_exc()
  202. if createLog:
  203. logFile.write(
  204. str(nError) + ". There was a problem to read the file " + line1 + " in the Listing section.\n")
  205. parseError = True
  206. if not parseError:
  207. persistError = False
  208. moveError = False
  209. num_in_db = 0
  210. num_persisted_moved = 0
  211. for rec in rw:
  212. rec = rec.split(',')
  213. # print(rec)
  214. # key = u"Top:" + rec[1].upper().strip() + u" User:" + rec[5].upper().strip()
  215. key = u"Url:" + cleanLink(rec[6]) + "page1"
  216. # print(key)
  217. if key in detPage:
  218. # Combining the information from Listing and Description Pages
  219. rmm = detPage[key]['rmm']
  220. rec = mergePages(rmm, rec)
  221. # Append to the list the classification of the topic
  222. # if isRussianForum(forum):
  223. # rec.append(str(predict(rec[1], getPosts(rec[8]), language='sup_russian')))
  224. # else:
  225. # rec.append(str(predict(rec[1], getPosts(rec[8]), language='sup_english')))
  226. rec.append(str(predict(rec[3], getPosts(rec[14]), language='sup_english')))
  227. # Persisting the information in the database
  228. try:
  229. persist_data(url, tuple(rec), cur)
  230. con.commit()
  231. except:
  232. trace = traceback.format_exc()
  233. if trace.find("already exists") == -1:
  234. nError += 1
  235. print("There was a problem to persist the file " + detPage[key]['filename'] + " in the database!")
  236. if createLog:
  237. logFile.write(
  238. str(nError) + ". There was a problem to persist the file " + detPage[key]['filename'] + " in the database.\n")
  239. persistError = True
  240. con.rollback()
  241. if not persistError:
  242. # move description files of completed folder
  243. for filename in detPage[key]['files']:
  244. source = line2.replace(os.path.basename(line2), "") + filename
  245. destination = line2.replace(os.path.basename(line2), "") + r'Read/'
  246. try:
  247. shutil.move(source, destination)
  248. num_persisted_moved += 1
  249. except:
  250. print("There was a problem to move the file " + filename + " in the Description section!")
  251. nError += 1
  252. if createLog:
  253. logFile.write(
  254. str(nError) + ". There was a problem to move the file " + filename + " in the Description section!.\n")
  255. moveError = True
  256. # if the associated description page is not read or not parsed
  257. else:
  258. # query database
  259. # if the post already exists:
  260. # num_in_db += 1
  261. pass
  262. # if number of topics on listing page is equal to
  263. # the number of merged, persisted, and moved topics plus
  264. # the number of topics already in the database
  265. if not persistError and not moveError and len(rw) == (num_persisted_moved + num_in_db):
  266. # move listing file to completed folder
  267. source = line1
  268. destination = line1.replace(os.path.basename(line1), "") + r'Read/'
  269. try:
  270. shutil.move(source, destination)
  271. except:
  272. nError += 1
  273. print("There was a problem to move the file " + line1 + " in the Listing section!")
  274. if createLog:
  275. logFile.write(str(nError) + ". There was a problem to move the file " + line1 + " in the Listing section!.\n")
  276. if createLog:
  277. logFile.close()
  278. #end = time.time()
  279. #finalTime = float(end-ini)
  280. #print (forum + " Parsing Perfomed Succesfully in %.2f" %finalTime + "!")
  281. input("Parsing the " + forum + " forum and data classification done successfully. Press ENTER to continue\n")