this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

226 lines
8.2 KiB

  1. __author__ = 'DarkWeb'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from MarketPlaces.Utilities.utilities import *
  4. # Here, we are importing BeautifulSoup to search through the HTML tree
  5. from bs4 import BeautifulSoup
  6. # This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
  7. def ares_description_parser(soup):
  8. # Fields to be parsed
  9. vendor = "-1" # 0 *Vendor_Name
  10. success = "-1" # 1 Vendor_Successful_Transactions
  11. rating_vendor = "-1" # 2 Vendor_Rating
  12. name = "-1" # 3 *Product_Name
  13. describe = "-1" # 4 Product_Description
  14. CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  15. MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
  16. category = "-1" # 7 Product_Category
  17. views = "-1" # 8 Product_Number_Of_Views
  18. reviews = "-1" # 9 Product_Number_Of_Reviews
  19. rating_item = "-1" # 10 Product_Rating
  20. addDate = "-1" # 11 Product_AddedDate
  21. BTC = "-1" # 12 Product_BTC_SellingPrice
  22. USD = "-1" # 13 Product_USD_SellingPrice
  23. EURO = "-1" # 14 Product_EURO_SellingPrice
  24. sold = "-1" # 15 Product_QuantitySold
  25. left = "-1" # 16 Product_QuantityLeft
  26. shipFrom = "-1" # 17 Product_ShippedFrom
  27. shipTo = "-1" # 18 Product_ShippedTo
  28. # Finding Product Name
  29. name = soup.find('div', {'class': "col-md-12 my-2"}).text
  30. name = name.replace('\n', ' ')
  31. name = name.replace(",", "")
  32. name = name.strip()
  33. bae = soup.find('div', {'class': "col-md-7"}).find('span').find_all('span')
  34. # Finding Vendor
  35. vendor = bae[0].text
  36. vendor = vendor.replace(",", "")
  37. vendor = vendor.replace("...", "")
  38. vendor = vendor.strip()
  39. # Finding Vendor Rating
  40. full_stars = bae[2].find_all('i', {'class': "fas fa-star"})
  41. half_star = bae[2].find('i', {'class': "fas fa-star-half-alt"})
  42. rating_vendor = len(full_stars) + (0.5 if half_star is not None else 0)
  43. # Finding Successful Transactions
  44. success = bae[4].text
  45. success = success.replace("Sales ", "")
  46. success = success.strip()
  47. bae = soup.find('span', {'class': "text-left"}).find_all('span')
  48. # Finding Prices
  49. USD = bae[0].text
  50. USD = USD.replace("\n$", "")
  51. USD = USD.strip()
  52. shipping_info = bae[4].text
  53. if "Digital" not in shipping_info:
  54. shipping_info = shipping_info.split(" ")
  55. # Finding Shipment Information (Origin)
  56. shipFrom = shipping_info[0].strip()
  57. # Finding Shipment Information (Destination)
  58. shipTo = shipping_info[1].strip()
  59. bae = soup.find_all('textarea')
  60. # Finding the Product description
  61. describe = bae[0].text
  62. describe = describe.replace("\n", " ")
  63. describe = describe.replace("\r", " ")
  64. describe = describe.strip()
  65. # Finding the Terms and Conditions
  66. terms = bae[1].text
  67. terms = terms.replace("\n", " ")
  68. terms = terms.strip()
  69. '''
  70. # Finding the Number of Product Reviews
  71. tag = soup.findAll(text=re.compile('Reviews'))
  72. for index in tag:
  73. reviews = index
  74. par = reviews.find('(')
  75. if par >=0:
  76. reviews = reviews.replace("Reviews (","")
  77. reviews = reviews.replace(")","")
  78. reviews = reviews.split(",")
  79. review = str(abs(int(reviews[0])) + abs(int(reviews[1])))
  80. else :
  81. review = "-1"
  82. '''
  83. # Searching for CVE and MS categories
  84. cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  85. if cve:
  86. CVE = " "
  87. for idx in cve:
  88. CVE += (idx)
  89. CVE += " "
  90. CVE = CVE.replace(',', ' ')
  91. CVE = CVE.replace('\n', '')
  92. ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
  93. if ms:
  94. MS = " "
  95. for im in ms:
  96. MS += (im)
  97. MS += " "
  98. MS = MS.replace(',', ' ')
  99. MS = MS.replace('\n', '')
  100. # Populating the final variable (this should be a list with all fields scraped)
  101. row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
  102. BTC, USD, EURO, sold, left, shipFrom, shipTo)
  103. # Sending the results
  104. return row
  105. # This is the method to parse the Listing Pages
  106. def ares_listing_parser(soup):
  107. # Fields to be parsed
  108. nm = 0 # *Total_Products (Should be Integer)
  109. mktName = "Ares" # 0 *Marketplace_Name
  110. vendor = [] # 1 *Vendor
  111. rating_vendor = [] # 2 Vendor_Rating
  112. success = [] # 3 Vendor_Successful_Transactions
  113. name = [] # 4 *Product_Name
  114. CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  115. MS = [] # 6 Product_MS_Classification (Microsoft Security)
  116. category = [] # 7 Product_Category
  117. describe = [] # 8 Product_Description
  118. views = [] # 9 Product_Number_Of_Views
  119. reviews = [] # 10 Product_Number_Of_Reviews
  120. rating_item = [] # 11 Product_Rating
  121. addDate = [] # 12 Product_AddDate
  122. BTC = [] # 13 Product_BTC_SellingPrice
  123. USD = [] # 14 Product_USD_SellingPrice
  124. EURO = [] # 15 Product_EURO_SellingPrice
  125. sold = [] # 16 Product_QuantitySold
  126. qLeft = [] # 17 Product_QuantityLeft
  127. shipFrom = [] # 18 Product_ShippedFrom
  128. shipTo = [] # 19 Product_ShippedTo
  129. href = [] # 20 Product_Links
  130. listing = soup.findAll('div', {"class": "col-md-4 my-md-0 my-2 col-12"})
  131. # Populating the Number of Products
  132. nm = len(listing)
  133. for a in listing:
  134. bae = a.findAll('a', href=True)
  135. # Adding the url to the list of urls
  136. link = bae[0].get('href')
  137. link = cleanLink(link)
  138. href.append(link)
  139. # Finding the Vendor
  140. vendor_name = bae[1].text
  141. vendor_name = vendor_name.replace(",", "")
  142. vendor_name = vendor_name.strip()
  143. vendor.append(vendor_name)
  144. # Finding the Product
  145. product = bae[2].find('img').get('alt')
  146. product = product.replace('\n', ' ')
  147. product = product.replace(",", "")
  148. product = product.strip()
  149. name.append(product)
  150. # Searching for CVE and MS categories
  151. cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  152. if not cve:
  153. cveValue="-1"
  154. else:
  155. cee = " "
  156. for idx in cve:
  157. cee += (idx)
  158. cee += " "
  159. cee = cee.replace(',', ' ')
  160. cee = cee.replace('\n', '')
  161. cveValue=cee
  162. CVE.append(cveValue)
  163. ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
  164. if not ms:
  165. MSValue="-1"
  166. else:
  167. me = " "
  168. for im in ms:
  169. me += (im)
  170. me += " "
  171. me = me.replace(',', ' ')
  172. me = me.replace('\n', '')
  173. MSValue=me
  174. MS.append(MSValue)
  175. # Populate the final variable (this should be a list with all fields scraped)
  176. return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
  177. reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
  178. def ares_links_parser(soup):
  179. # Returning all links that should be visited by the Crawler
  180. href = []
  181. listing = soup.findAll('a', {"class": "btn btn-success w-100 my-1"})
  182. for a in listing:
  183. link = a['href']
  184. href.append(link)
  185. return href