this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

305 lines
12 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. import codecs
  3. import glob
  4. import os
  5. import shutil
  6. from Forums.DB_Connection.db_connection import *
  7. from Forums.BestCardingWorld.parser import *
  8. from Forums.CryptBB.parser import *
  9. from Forums.Classifier.classify_product import predict
  10. # from DarkWebMining_Sample.Forums.Classifier.classify_product import predict_semi
  11. # determines if forum is russian, not really used now but maybe later
  12. def isRussianForum(forum):
  13. with open('russian_forums.txt') as f:
  14. forums = f.readlines()
  15. result = False
  16. for iforum in forums:
  17. iforum = iforum.replace('\n','')
  18. if iforum == forum:
  19. result = True
  20. break
  21. return result
  22. #tries to match description pages to listing pages by using a key made for every description page and every link in listing page
  23. #once verified and matched, the info is merged into a 'rec', which is returned
  24. #@param: detPage is a list of keys of valid pages, rec is the row of data of an instance
  25. #return: rec, row of data, that may have additional data added on after matching description to listing page
  26. def mergePages(rmm, rec):
  27. # key = u"Top:" + rec[1].upper().strip() + u" User:" + rec[5].upper().strip()
  28. # key = rec[16]
  29. print ("----------------- Matched: " + rec[3] + "--------------------")
  30. rec[9] = rmm[1]
  31. rec[10] = rmm[2]
  32. rec[11] = rmm[3]
  33. rec[12] = rmm[4]
  34. rec[13] = rmm[5]
  35. rec[14] = rmm[6]
  36. rec[15] = rmm[7]
  37. rec[16] = rmm[8]
  38. return rec
  39. #gets a string of posts and joins them together into one string to be put in the database as one string of text
  40. #@param: list of strings (the posts of a thread)
  41. #return: string containing the concatenation of all the strings
  42. def getPosts(posts):
  43. strPosts = ' '.join(posts)
  44. return strPosts.strip()
  45. #uses db connection , another program, methods to persists values to the correct categories
  46. #@param: row is the list of entries for this instance, cur is the db connection object
  47. def persist_data(url, row, cur):
  48. forum = create_forum(cur, row, url)
  49. board = create_board(cur, row, forum)
  50. author = create_user(cur, row, forum, 0)
  51. topic = create_topic(cur, row, forum, board, author)
  52. create_posts(cur, row, forum, board, topic)
  53. #main method for this program, what actually gets the parsed info from the parser, and persists them into the db
  54. #calls the different parser methods here depending on the type of html page
  55. def new_parse(forum, url, createLog):
  56. from Forums.Initialization.forums_mining import CURRENT_DATE
  57. print("Parsing The " + forum + " Forum and conduct data classification to store the information in the database.")
  58. # ini = time.time()
  59. # Connecting to the database
  60. con = connectDataBase()
  61. cur = con.cursor()
  62. # Creating the tables (The database should be created manually)
  63. create_database(cur, con)
  64. nError = 0
  65. lines = [] # listing pages
  66. lns = [] # description pages
  67. detPage = {}
  68. # Creating the log file for each Forum
  69. if createLog:
  70. if not os.path.exists("./" + forum + "/Logs/" + forum + "_" + CURRENT_DATE + ".log"):
  71. logFile = open("./" + forum + "/Logs/" + forum + "_" + CURRENT_DATE + ".log", "w")
  72. else:
  73. print("Files of the date " + CURRENT_DATE + " from the Forum " + forum +
  74. " were already read. Delete the referent information in the Data Base and also delete the log file"
  75. " in the _Logs folder to read files from this Forum of this date again.")
  76. raise SystemExit
  77. # Reading the Listing Html Pages
  78. for fileListing in glob.glob(os.path.join("..\\" + forum + "\\HTML_Pages\\" + CURRENT_DATE + "\\Listing", '*.html')):
  79. lines.append(fileListing)
  80. # Reading the Description Html Pages
  81. for fileDescription in glob.glob(os.path.join("..\\" + forum + "\\HTML_Pages\\" + CURRENT_DATE + "\\Description" ,'*.html')):
  82. lns.append(fileDescription)
  83. # Parsing the Description Pages and put the tag's content into a dictionary (Hash table)
  84. for index, line2 in enumerate(lns):
  85. print("Reading description folder of '" + forum + "', file '" + os.path.basename(line2) + "', index= " + str(index + 1) + " ... " + str(len(lns)))
  86. try:
  87. html = codecs.open(line2.strip('\n'), encoding='utf8')
  88. soup = BeautifulSoup(html, "html.parser")
  89. html.close()
  90. except:
  91. try:
  92. html = open(line2.strip('\n'))
  93. soup = BeautifulSoup(html, "html.parser")
  94. html.close()
  95. except:
  96. nError += 1
  97. print("There was a problem to read the file " + line2 + " in the Description section!")
  98. if createLog:
  99. logFile.write(str(nError) + ". There was a problem to read the file " + line2 + " in the Description section!\n")
  100. continue
  101. try:
  102. if forum == "BestCardingWorld":
  103. rmm = bestcardingworld_description_parser(soup)
  104. elif forum == "CryptBB":
  105. rmm = cryptBB_description_parser(soup)
  106. # key = u"Top:" + rmm[0].upper().strip() + u" User:" + rmm[2][0].upper().strip()
  107. key = u"Url:" + os.path.basename(line2).replace(".html", "")
  108. # save file address with description record in memory
  109. detPage[key] = {'rmm': rmm, 'filename': os.path.basename(line2)}
  110. except:
  111. nError += 1
  112. print("There was a problem to parse the file " + line2 + " in the Description section!")
  113. if createLog:
  114. logFile.write(str(nError) + ". There was a problem to parse the file " + line2 + " in the Description section.\n")
  115. # Parsing the Listing Pages and put the tag's content into a list
  116. for index, line1 in enumerate(lines):
  117. print("Reading listing folder of '" + forum + "', file '" + os.path.basename(line1) + "', index= " + str(index + 1) + " ... " + str(len(lines)))
  118. readError = False
  119. try:
  120. html = codecs.open(line1.strip('\n'), encoding='utf8')
  121. soup = BeautifulSoup(html, "html.parser")
  122. html.close()
  123. except:
  124. try:
  125. html = open(line1.strip('\n'))
  126. soup = BeautifulSoup(html, "html.parser")
  127. html.close()
  128. except:
  129. nError += 1
  130. print("There was a problem to read the file " + line1 + " in the Listing section!")
  131. if createLog:
  132. logFile.write(str(nError) + ". There was a problem to read the file " + line1 + " in the Listing section.\n")
  133. readError = True
  134. if not readError:
  135. parseError = False
  136. try:
  137. if forum == "BestCardingWorld":
  138. rw = bestcardingworld_listing_parser(soup)
  139. elif forum == "CryptBB":
  140. rw = cryptBB_listing_parser(soup)
  141. except:
  142. nError += 1
  143. print("There was a problem to read the file " + line1 + " in the listing section!")
  144. traceback.print_exc()
  145. if createLog:
  146. logFile.write(
  147. str(nError) + ". There was a problem to read the file " + line1 + " in the Listing section.\n")
  148. parseError = True
  149. if not parseError:
  150. persistError = False
  151. moveError = False
  152. num_in_db = 0
  153. num_persisted_moved = 0
  154. for rec in rw:
  155. rec = rec.split(',')
  156. # key = u"Top:" + rec[1].upper().strip() + u" User:" + rec[5].upper().strip()
  157. key = u"Url:" + cleanLink(rec[6])
  158. if key in detPage:
  159. # Combining the information from Listing and Description Pages
  160. rmm = detPage[key]['rmm']
  161. rec = mergePages(rmm, rec)
  162. # Append to the list the classification of the topic
  163. # if isRussianForum(forum):
  164. # rec.append(str(predict(rec[1], getPosts(rec[8]), language='sup_russian')))
  165. # else:
  166. # rec.append(str(predict(rec[1], getPosts(rec[8]), language='sup_english')))
  167. rec.append(str(predict(rec[3], getPosts(rec[14]), language='sup_english')))
  168. # Persisting the information in the database
  169. try:
  170. persist_data(url, tuple(rec), cur)
  171. con.commit()
  172. except:
  173. trace = traceback.format_exc()
  174. if trace.find("already exists") == -1:
  175. nError += 1
  176. print("There was a problem to persist the file " + detPage[key]['filename'] + " in the database!")
  177. if createLog:
  178. logFile.write(
  179. str(nError) + ". There was a problem to persist the file " + detPage[key]['filename'] + " in the database.\n")
  180. persistError = True
  181. con.rollback()
  182. if not persistError:
  183. # move description files of completed folder
  184. source = line2.replace(os.path.basename(line2), "") + detPage[key]['filename']
  185. destination = line2.replace(os.path.basename(line2), "") + r'Read/'
  186. try:
  187. shutil.move(source, destination)
  188. num_persisted_moved += 1
  189. except:
  190. print("There was a problem to move the file " + detPage[key]['filename'] + " in the Description section!")
  191. nError += 1
  192. if createLog:
  193. logFile.write(
  194. str(nError) + ". There was a problem to move the file " + detPage[key]['filename'] + " in the Description section!.\n")
  195. moveError = True
  196. # if the associated description page is not read or not parsed
  197. else:
  198. # query database
  199. # if the post already exists:
  200. # num_in_db += 1
  201. pass
  202. # if number of topics on listing page is equal to
  203. # the number of merged, persisted, and moved topics plus
  204. # the number of topics already in the database
  205. if not persistError and not moveError and len(rw) == (num_persisted_moved + num_in_db):
  206. # move listing file to completed folder
  207. source = line1
  208. destination = line1.replace(os.path.basename(line1), "") + r'Read/'
  209. try:
  210. shutil.move(source, destination)
  211. except:
  212. nError += 1
  213. print("There was a problem to move the file " + line1 + " in the Listing section!")
  214. if createLog:
  215. logFile.write(str(nError) + ". There was a problem to move the file " + line1 + " in the Listing section!.\n")
  216. if createLog:
  217. logFile.close()
  218. #end = time.time()
  219. #finalTime = float(end-ini)
  220. #print (forum + " Parsing Perfomed Succesfully in %.2f" %finalTime + "!")
  221. input("Parsing the " + forum + " forum and data classification done successfully. Press ENTER to continue\n")