this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

282 lines
11 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from MarketPlaces.Utilities.utilities import *
  4. # Here, we are importing BeautifulSoup to search through the HTML tree
  5. from bs4 import BeautifulSoup
  6. # parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
  7. # stores info it needs in different lists, these lists are returned after being organized
  8. # @param: soup object looking at html page of description page
  9. # return: 'row' that contains a variety of lists that each hold info on the description page
  10. def metaversemarket_description_parser(soup):
  11. # Fields to be parsed
  12. vendor = "-1" # 0 *Vendor_Name
  13. success = "-1" # 1 Vendor_Successful_Transactions
  14. rating_vendor = "-1" # 2 Vendor_Rating
  15. name = "-1" # 3 *Product_Name
  16. describe = "-1" # 4 Product_Description
  17. CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  18. MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
  19. category = "-1" # 7 Product_Category
  20. views = "-1" # 8 Product_Number_Of_Views
  21. reviews = "-1" # 9 Product_Number_Of_Reviews
  22. rating_item = "-1" # 10 Product_Rating
  23. addDate = "-1" # 11 Product_AddedDate
  24. BTC = "-1" # 12 Product_BTC_SellingPrice
  25. USD = "-1" # 13 Product_USD_SellingPrice
  26. EURO = "-1" # 14 Product_EURO_SellingPrice
  27. sold = "-1" # 15 Product_QuantitySold
  28. left = "-1" # 16 Product_QuantityLeft
  29. shipFrom = "-1" # 17 Product_ShippedFrom
  30. shipTo = "-1" # 18 Product_ShippedTo
  31. image = "-1" # 19 Product_Image
  32. vendor_image = "-1" # 20 Vendor_Image
  33. # Finding Product Name
  34. name = soup.find('div', {'class': "panel-heading"}).text
  35. name = cleanString(name.strip())
  36. # Finding Vendor
  37. temp = soup.findAll('div', {'class': "col-xs-12 col-sm-6 mt-5"})
  38. temp = temp[1].findAll('span')
  39. vendor = temp[1].find('b').text
  40. vendor = cleanString(vendor.strip())
  41. # Finding Product Reviews
  42. reviews = soup.find('span', {'class': "badge bg-success fs-12px"}).text.strip()
  43. # Finding Successful Transactions
  44. # NA
  45. # Finding Prices
  46. USD = soup.find('h3', {'class': "mb-2"}).text
  47. USD = USD.replace("Price: $", "").strip()
  48. # Finding the Product Category
  49. temp = soup.select('div[class="mt-2"]')[1].text
  50. temp = temp.replace("Category:", "")
  51. category = temp.strip()
  52. # Finding the Product Quantity Available
  53. # temp = soup.find('em', {'class': "icon ni ni-layers-fill"}).parent.parent.parent
  54. # left = temp.text
  55. # left = left.replace("Supply:", "")
  56. # left = left.strip()
  57. temp = soup.findAll('span', {'class': "badge bg-success"})
  58. temp = temp[1].text.split("/")
  59. left = temp[1].strip()
  60. # Finding Number Sold
  61. sold = temp[0].strip()
  62. # Finding Shipment Information (Origin)
  63. temp = soup.find('div', {'class': "alert alert-info"}).text
  64. temp = temp.split("to")
  65. shipFrom = temp[0].replace("Shipping from ", "").strip()
  66. # Finding Shipment Information (Destination)
  67. shipTo = temp[1].split("for")
  68. shipTo = shipTo[0].strip()
  69. # Finding the Product description
  70. describe = soup.find('p', {'class': "card-text"}).text
  71. describe = describe.replace("\n", " ")
  72. describe = describe.strip()
  73. '''# Finding the Number of Product Reviews
  74. tag = soup.findAll(text=re.compile('Reviews'))
  75. for index in tag:
  76. reviews = index
  77. par = reviews.find('(')
  78. if par >=0:
  79. reviews = reviews.replace("Reviews (","")
  80. reviews = reviews.replace(")","")
  81. reviews = reviews.split(",")
  82. review = str(abs(int(reviews[0])) + abs(int(reviews[1])))
  83. else :
  84. review = "-1"'''
  85. # Searching for CVE and MS categories
  86. cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  87. if cve:
  88. CVE = " "
  89. for idx in cve:
  90. CVE += (idx)
  91. CVE += " "
  92. CVE = CVE.replace(',', ' ')
  93. CVE = CVE.replace('\n', '')
  94. ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
  95. if ms:
  96. MS = " "
  97. for im in ms:
  98. MS += (im)
  99. MS += " "
  100. MS = MS.replace(',', ' ')
  101. MS = MS.replace('\n', '')
  102. # Populating the final variable (this should be a list with all fields scraped)
  103. row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
  104. BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
  105. # Sending the results
  106. return row
  107. # parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
  108. # stores info it needs in different lists, these lists are returned after being organized
  109. # @param: soup object looking at html page of listing page
  110. # return: 'row' that contains a variety of lists that each hold info on the listing page
  111. def metaversemarket_listing_parser(soup):
  112. # Fields to be parsed
  113. nm = 0 # *Total_Products (Should be Integer)
  114. mktName = "MetaVerseMarket" # 0 *Marketplace_Name
  115. vendor = [] # 1 *Vendor y
  116. rating_vendor = [] # 2 Vendor_Rating
  117. success = [] # 3 Vendor_Successful_Transactions
  118. name = [] # 4 *Product_Name y
  119. CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
  120. MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this
  121. category = [] # 7 Product_Category y
  122. describe = [] # 8 Product_Description
  123. views = [] # 9 Product_Number_Of_Views
  124. reviews = [] # 10 Product_Number_Of_Reviews
  125. rating_item = [] # 11 Product_Rating
  126. addDate = [] # 12 Product_AddDate
  127. BTC = [] # 13 Product_BTC_SellingPrice
  128. USD = [] # 14 Product_USD_SellingPrice y
  129. EURO = [] # 15 Product_EURO_SellingPrice
  130. sold = [] # 16 Product_QuantitySold
  131. qLeft = [] # 17 Product_QuantityLeft
  132. shipFrom = [] # 18 Product_ShippedFrom
  133. shipTo = [] # 19 Product_ShippedTo
  134. image = [] # 20 Product_Image
  135. image_vendor = [] # 21 Vendor_Image
  136. href = [] # 22 Product_Links
  137. listing = soup.findAll('div', {"class": "col-12 col-sm-4 col-xl-3 product_item_col p-1"})
  138. # Populating the Number of Products
  139. nm = len(listing)
  140. for a in listing:
  141. bae = a.findAll('a', href=True)
  142. # Adding the url to the list of urls
  143. link = bae[0].get('href')
  144. link = cleanLink(link)
  145. href.append(link)
  146. # Finding the Product
  147. product = bae[1].find('span', {"class": "text-primary"}).text
  148. name.append(cleanString(product.strip()))
  149. # Finding Prices
  150. price = a.find('strong').text
  151. price = price.replace("Buy for $", "")
  152. price = price.strip()
  153. USD.append(price)
  154. # Finding the Vendor
  155. temp = a.find('div', {'class': "mt-1 fs-12px"})
  156. temp = temp.findAll('span')
  157. vendor_name = temp[1].find('b').text
  158. vendor.append(cleanString(vendor_name.strip()))
  159. # Finding the Category
  160. cat = a.select_one('div[class="fs-12px"]')
  161. cat = cat.findAll('span')[1].text
  162. cat = cat.strip()
  163. category.append(cat)
  164. badge = a.find('span', {'class': "badge bg-success"})
  165. # Finding Number Sold and Quantity Left
  166. temp = badge.text
  167. temp = temp.split("/")
  168. num = temp[0]
  169. num = num.strip()
  170. sold.append(num)
  171. quant = temp[1]
  172. quant = quant.strip()
  173. qLeft.append(quant)
  174. # Finding Successful Transactions
  175. # NA
  176. # Finding Product review
  177. review = a.find('span', {'class': "badge bg-success fs-12px"}).text
  178. review = review.replace("+ ", "")
  179. reviews.append(review)
  180. # Finding Descrption
  181. description = a.find('p', {'class': "alert alert-light text-ssbold p-1"}).text
  182. description = description.replace("\n", " ")
  183. description = description.strip()
  184. describe.append(cleanString(description))
  185. # Finding Number of Views
  186. view = a.find('span', {'class': "badge bg-primary"}).text.strip()
  187. views.append(view)
  188. # Find where ships from
  189. ships = a.find('div', {'class': "alert alert-info item_alert fs-12px p-1"})
  190. ships = ships.findAll('b')
  191. sFrom = ships[0].text.strip()
  192. shipFrom.append(sFrom)
  193. # Find where it ships to
  194. sTo = ships[1].text.strip()
  195. shipTo.append(sTo)
  196. # Searching for CVE and MS categories
  197. cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  198. if not cve:
  199. cveValue = "-1"
  200. else:
  201. cee = " "
  202. for idx in cve:
  203. cee += (idx)
  204. cee += " "
  205. cee = cee.replace(',', ' ')
  206. cee = cee.replace('\n', '')
  207. cveValue = cee
  208. CVE.append(cveValue)
  209. ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
  210. if not ms:
  211. MSValue = "-1"
  212. else:
  213. me = " "
  214. for im in ms:
  215. me += (im)
  216. me += " "
  217. me = me.replace(',', ' ')
  218. me = me.replace('\n', '')
  219. MSValue = me
  220. MS.append(MSValue)
  221. # Populate the final variable (this should be a list with all fields scraped)
  222. return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
  223. reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)
  224. # called by the crawler to get description links on a listing page
  225. # @param: beautifulsoup object that is using the correct html page (listing page)
  226. # return: list of description links from a listing page
  227. def metaversemarket_links_parser(soup):
  228. # Returning all links that should be visited by the Crawler
  229. href = []
  230. listing = soup.findAll('div', {"class": "col-12 col-sm-4 col-xl-3 product_item_col p-1"})
  231. for a in listing:
  232. bae = a.find('a', href=True)
  233. link = bae['href']
  234. href.append(link)
  235. return href