__author__ = 'DarkWeb' import glob import os import codecs import shutil from MarketPlaces.DB_Connection.db_connection import * from MarketPlaces.DarkFox.parser import * from MarketPlaces.Tor2door.parser import * from MarketPlaces.Classifier.classify_product import predict def mergePages(rmm, rec): # key = u"Pr:" + rec[1].upper() + u" Vendor:" + rec[18].upper() # key = rec[23] print("----------------- Matched: " + rec[1] + "--------------------") # if rec[1] == "-1": #Item_Name # rec[1] = rmm[0] rec[1] = rmm[0] if rec[2] == "-1": #Item_CVE_Classification rec[2] = rmm[4] if rec[3] == "-1": #Item_MS_Classification rec[3] = rmm[5] if rec[4] == "-1": #Item_MarketCategory rec[4] = rmm[7] if rec[5] == "-1": #Item_Description rec[5] = rmm[1] elif rmm[1] != "-1": rec[5] = rec[5] + " " + rmm[1] if rec[6] == "-1": #Item _EscrowInfo rec[6] = rmm[11] #rec[7] = "-1" #Item__N.OfViews if rec[8] == "-1": #Item_Reviews rec[8] = rmm[6] if rec[9] == "-1": #Item_AddedDate rec[9] = rmm[15] if rec[10] == "-1": #Item_LastViewedDate rec[10] = rmm[2] if rec[11] == "-1": #Item_BTC_SellingPrice rec[11] = rmm[18] if rec[12] == "-1": #Item_US_SellingPrice rec[12] = rmm[19] if rec[13] == "-1": #Item_EURO_SellingPrice rec[13] = rmm[22] if rec[14] == "-1": #Item_QuantitySold rec[14] = rmm[14] if rec[15] == "-1": #Item_QuantityLeft rec[15] = rmm[10] if rec[16] == "-1": #Item_ShippedFrom rec[16] = rmm[8] if rec[17] == "-1": #Item_ShippedTo rec[17] = rmm[9] if rec[18] == "-1": #Vendor_Name rec[18] = rmm[13] if rec[19] == "-1": #Vendor_Rating rec[19] = rmm[20] if rec[20] == "-1": #Vendor_Successfull Transactions rec[20] = rmm[21] if rec[21] == "-1": #Vendor_TermsAndConditions rec[21] = rmm[12] #rec[?] = rmm[17] #Item_EndDate #rec[?] = rmm[?] #Item_Feedback #rec[?] = rmm[?] #Shipping Options #rec[?] = rmm[?] #Average Delivery Time return rec def persist_data(row, cur): marketPlace = create_marketPlace(cur, row) vendor = create_vendor(cur, row) create_items(cur, row, marketPlace, vendor) def new_parse(marketPlace, createLog): print("Parsing the " + marketPlace + " marketplace and conduct data classification to store the information in the database.") crawlerDate = date.today() # ini = time.time() global site #Connecting to the database con = connectDataBase() cur = con.cursor() #Creating the tables (The database should be created manually) create_database(cur, con) nError = 0 lines = [] #lines.clear() lns = [] #lns.clear() detPage = {} rw = [] #Creating the log file for each Market Place if createLog: if not os.path.exists("./" + marketPlace + "/Logs/" + marketPlace + "_" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + ".log"): logFile = open("./" + marketPlace + "/Logs/" + marketPlace + "_" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + ".log", "w") else: print("Files of the date " + str("%02d" %crawlerDate.month) + "/" + str("%02d" %crawlerDate.day) + "/" + str("%04d" %crawlerDate.year) + " from the Market Place " + marketPlace + " were already read. Delete the referent information in the Data Base and also delete the log file " "in the _Logs folder to read files from this Market Place of this date again.") raise SystemExit # Reading the Listing Html Pages for fileListing in glob.glob(os.path.join (os.getcwd().replace("Initialization","") + marketPlace + "\\HTML_Pages\\" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + "\\Listing" ,'*.html')): lines.append(fileListing) # Reading the Description Html Pages for fileDescription in glob.glob(os.path.join (os.getcwd().replace("Initialization","") + marketPlace + "\\HTML_Pages\\" + str("%02d" %crawlerDate.month) + str("%02d" %crawlerDate.day) + str("%04d" %crawlerDate.year) + "\\Description" ,'*.html')): lns.append(fileDescription) # Parsing the Description Pages and put the tag's content into a dictionary (Hash table) for index, line2 in enumerate(lns): print("Reading description folder of '" + marketPlace + "', file '" + os.path.basename(line2) + "', index= " + str(index + 1) + " ... " + str(len(lns))) try: html = codecs.open(line2.strip('\n'), encoding='utf8') soup = BeautifulSoup(html, "html.parser") html.close() except: try: html = open(line2.strip('\n')) soup = BeautifulSoup(html, "html.parser") html.close() except: nError += 1 print("There was a problem to read the file " + line2 + " in the Description section!") if createLog: logFile.write(str(nError) + ". There was a problem to read the file " + line2 + " in the Description section.\n") continue try: if marketPlace == "DarkFox": rmm = darkfox_description_parser(soup) elif marketPlace == "Tor2door": rmm = tor2door_description_parser(soup) # key = u"Pr:" + rmm[0].upper()[:desc_lim1] + u" Vendor:" + rmm[13].upper()[:desc_lim2] key = u"Url:" + os.path.basename(line2).replace(".html", "") # save file address with description record in memory detPage[key] = {'rmm': rmm, 'filename': os.path.basename(line2)} except: nError += 1 print("There was a problem to parse the file " + line2 + " in the Description section!") if createLog: logFile.write(str(nError) + ". There was a problem to parse the file " + line2 + " in the Description section.\n") # Parsing the Listing Pages and put the tag's content into a list for index, line1 in enumerate(lines): print("Reading listing folder of '" + marketPlace + "', file '" + os.path.basename(line1) + "', index= " + str(index + 1) + " ... " + str(len(lines))) readError = False try: html = codecs.open(line1.strip('\n'), encoding='utf8') soup = BeautifulSoup(html, "html.parser") html.close() except: try: html = open(line1.strip('\n')) soup = BeautifulSoup(html, "html.parser") html.close() except: nError += 1 print("There was a problem to read the file " + line1 + " in the Listing section!") if createLog: logFile.write(str(nError) + ". There was a problem to read the file " + line1 + " in the Listing section.\n") readError = True if not readError: parseError = False try: if marketPlace == "DarkFox": rw = darkfox_listing_parser(soup) elif marketPlace == "Tor2door": rw = tor2door_listing_parser(soup) else: parseError = True except: nError += 1 print("There was a problem to parse the file " + line1 + " in the listing section!") if createLog: logFile.write( str(nError) + ". There was a problem to parse the file " + line1 + " in the Listing section.\n") parseError = True if not parseError: persistError = False moveError = False num_in_db = 0 num_persisted_moved = 0 for rec in rw: rec = rec.split(',') # if len(detPage) > 0: #It was created here just because Zeroday Market does not have Description Pages # key = rec[23] # key = u"Pr:" + rec[1].upper()[:list_lim1] + u" Vendor:" + rec[18].upper()[:list_lim2] # key = u"Pr:" + rec[1].upper() url = ''.join(e for e in rec[23] if e.isalnum()) key = u"Url:" + url # if the associated description page is parsed if key in detPage: # rec = mergePages(detPage, rec) # Combining the information from Listing and Description Pages rmm = detPage[key]['rmm'] rec = mergePages(rmm, rec) # Append to the list the classification of the product # rec.append(str(predict(rec[1], rec[5], language='markets'))) rec.append(str(predict(rec[1], rec[5], language='sup_english'))) # Persisting the information in the database try: persist_data(tuple(rec), cur) con.commit() except: trace = traceback.format_exc() if trace.find("already exists") == -1: nError += 1 print("There was a problem to persist the file " + detPage[key]['filename'] + " in the database!") if createLog: logFile.write( str(nError) + ". There was a problem to persist the file " + detPage[key]['filename'] + " in the database.\n") persistError = True con.rollback() if not persistError: # move description files of completed folder source = line2.replace(os.path.basename(line2), "") + detPage[key]['filename'] destination = line2.replace(os.path.basename(line2), "") + r'Read/' try: shutil.move(source, destination) num_persisted_moved += 1 except: print("There was a problem to move the file " + detPage[key]['filename'] + " in the Description section!") nError += 1 if createLog: logFile.write( str(nError) + ". There was a problem to move the file " + detPage[key]['filename'] + " in the Description section!.\n") moveError = True # if the associated description page is not read or not parsed else: # query database # if the product already exists: # num_in_db += 1 pass # if number of products on listing page is equal to # the number of merged, persisted, and moved products plus # the number of products already in the database if not persistError and not moveError and len(rw) == (num_persisted_moved + num_in_db): # move listing file to completed folder source = line1 destination = line1.replace(os.path.basename(line1), "") + r'Read/' try: shutil.move(source, destination) except: nError += 1 print("There was a problem to move the file " + line1 + " in the Listing section!") if createLog: logFile.write(str(nError) + ". There was a problem to move the file " + line1 + " in the Listing section!.\n") # g.close () if createLog: logFile.close() # end = time.time() # finalTime = float(end-ini) # print (marketPlace + " Parsing Perfomed Succesfully in %.2f" %finalTime + "!") input("Parsing the " + marketPlace + " marketplace and data classification done successfully. Press ENTER to continue\n")