this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

211 lines
8.6 KiB

  1. __author__ = 'DarkWeb'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from MarketPlaces.Utilities.utilities import *
  4. # Here, we are importing BeautifulSoup to search through the HTML tree
  5. from bs4 import BeautifulSoup
  6. #parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
  7. #stores info it needs in different lists, these lists are returned after being organized
  8. #@param: soup object looking at html page of description page
  9. #return: 'row' that contains a variety of lists that each hold info on the description page
  10. def nexus_description_parser(soup):
  11. # Fields to be parsed
  12. vendor = "-1" # 0 *Vendor_Name
  13. success = "-1" # 1 Vendor_Successful_Transactions
  14. rating_vendor = "-1" # 2 Vendor_Rating
  15. name = "-1" # 3 *Product_Name
  16. describe = "-1" # 4 Product_Description
  17. CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  18. MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
  19. category = "-1" # 7 Product_Category
  20. views = "-1" # 8 Product_Number_Of_Views
  21. reviews = "-1" # 9 Product_Number_Of_Reviews
  22. rating_item = "-1" # 10 Product_Rating
  23. addDate = "-1" # 11 Product_AddedDate
  24. BTC = "-1" # 12 Product_BTC_SellingPrice
  25. USD = "-1" # 13 Product_USD_SellingPrice
  26. EURO = "-1" # 14 Product_EURO_SellingPrice
  27. sold = "-1" # 15 Product_QuantitySold
  28. left = "-1" # 16 Product_QuantityLeft
  29. shipFrom = "-1" # 17 Product_ShippedFrom
  30. shipTo = "-1" # 18 Product_ShippedTo
  31. image = "-1" # 19 Product_Image
  32. vendor_image = "-1" # 20 Vendor_Image
  33. divmd7 = soup.find('div', {'class': "col-md-7"})
  34. # Finding Vendor
  35. vendor = divmd7.find('a').text.strip()
  36. # Finding Prices
  37. USD = soup.find('span', {'class': "total"}).text.strip()
  38. tempBTC = soup.find('div', {'class': "text-center"}).text.strip()
  39. BTC = tempBTC.replace("BTC", "").strip()
  40. # Finding Product Image
  41. image = soup.find('img', {'class': 'img-fluid'})
  42. image = image.get('src')
  43. image = image.split('base64,')[-1]
  44. # Finding the Product description
  45. describe = soup.find('div', {'class': "text-white"}).text
  46. describe = cleanString(describe.strip())
  47. # Searching for CVE and MS categories
  48. cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  49. if cve:
  50. CVE = " "
  51. for idx in cve:
  52. CVE += (idx)
  53. CVE += " "
  54. CVE = CVE.replace(',', ' ')
  55. CVE = CVE.replace('\n', '')
  56. ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
  57. if ms:
  58. MS = " "
  59. for im in ms:
  60. MS += (im)
  61. MS += " "
  62. MS = MS.replace(',', ' ')
  63. MS = MS.replace('\n', '')
  64. # Populating the final variable (this should be a list with all fields scraped)
  65. row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
  66. BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
  67. # Sending the results
  68. return row
  69. #parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
  70. #stores info it needs in different lists, these lists are returned after being organized
  71. #@param: soup object looking at html page of listing page
  72. #return: 'row' that contains a variety of lists that each hold info on the listing page
  73. def nexus_listing_parser(soup):
  74. # Fields to be parsed
  75. nm = 0 # *Total_Products (Should be Integer)
  76. mktName = "CityMarket" # 0 *Marketplace_Name
  77. vendor = [] # 1 *Vendor y
  78. rating_vendor = [] # 2 Vendor_Rating
  79. success = [] # 3 Vendor_Successful_Transactions
  80. name = [] # 4 *Product_Name y
  81. CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
  82. MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this
  83. category = [] # 7 Product_Category y
  84. describe = [] # 8 Product_Description
  85. views = [] # 9 Product_Number_Of_Views
  86. reviews = [] # 10 Product_Number_Of_Reviews
  87. rating_item = [] # 11 Product_Rating
  88. addDate = [] # 12 Product_AddDate
  89. BTC = [] # 13 Product_BTC_SellingPrice
  90. USD = [] # 14 Product_USD_SellingPrice y
  91. EURO = [] # 15 Product_EURO_SellingPrice
  92. sold = [] # 16 Product_QuantitySold
  93. qLeft = [] # 17 Product_QuantityLeft
  94. shipFrom = [] # 18 Product_ShippedFrom
  95. shipTo = [] # 19 Product_ShippedTo
  96. image = [] # 20 Product_Image
  97. image_vendor = [] # 21 Vendor_Image
  98. href = [] # 22 Product_Links
  99. listing = soup.findAll('div', {"class": "p-4"})
  100. # Populating the Number of Products
  101. nm = len(listing)
  102. for a in listing:
  103. bae = a.findAll('a', href=True)
  104. # Adding the url to the list of urls
  105. link = bae[0].get('href')
  106. href.append(link)
  107. # Category
  108. tempCategory = soup.find('select', {"name": "category"})
  109. tempCategory = tempCategory.find('option', selected=True).text.strip()
  110. category.append(tempCategory)
  111. # Product Name
  112. product = a.find('h4', {"class": "text-center"}).text
  113. product = product.replace('\n', ' ')
  114. product = product.replace(",", "")
  115. product = product.replace("...", "")
  116. product = product.strip()
  117. name.append(product)
  118. # USD and BTC Price
  119. price = a.find('div', {"class": "price"}).text
  120. if "~" in price:
  121. tempUSD = price.split("~")[0]
  122. tempUSD = tempUSD.replace("$", "")
  123. tempUSD = tempUSD.strip()
  124. USD.append(tempUSD)
  125. tempBTC = price.split("~")[1]
  126. tempBTC = tempBTC.replace("BTC", "")
  127. tempBTC = tempBTC.strip()
  128. BTC.append(tempBTC)
  129. else:
  130. USD.append("-1")
  131. BTC.append("-1")
  132. # Img
  133. product_image = a.find('img')
  134. product_image = product_image.get('src')
  135. product_image = product_image.split('base64,')[-1]
  136. image.append(product_image)
  137. # Searching for CVE and MS categories
  138. cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  139. if not cve:
  140. cveValue="-1"
  141. else:
  142. cee = " "
  143. for idx in cve:
  144. cee += (idx)
  145. cee += " "
  146. cee = cee.replace(',', ' ')
  147. cee = cee.replace('\n', '')
  148. cveValue=cee
  149. CVE.append(cveValue)
  150. ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
  151. if not ms:
  152. MSValue="-1"
  153. else:
  154. me = " "
  155. for im in ms:
  156. me += (im)
  157. me += " "
  158. me = me.replace(',', ' ')
  159. me = me.replace('\n', '')
  160. MSValue=me
  161. MS.append(MSValue)
  162. # Populate the final variable (this should be a list with all fields scraped)
  163. return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
  164. reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)
  165. #called by the crawler to get description links on a listing page
  166. #@param: beautifulsoup object that is using the correct html page (listing page)
  167. #return: list of description links from a listing page
  168. def nexus_links_parser(soup):
  169. # Returning all links that should be visited by the Crawler
  170. href = []
  171. listing = soup.findAll('div', {"class": "p-4"})
  172. for a in listing:
  173. bae = a.find('a', href=True)
  174. link = bae['href']
  175. href.append(link)
  176. return href