this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

287 lines
11 KiB

  1. __author__ = 'DarkWeb'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from MarketPlaces.Utilities.utilities import *
  4. # Here, we are importing BeautifulSoup to search through the HTML tree
  5. from bs4 import BeautifulSoup
  6. # parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
  7. # stores info it needs in different lists, these lists are returned after being organized
  8. # @param: soup object looking at html page of description page
  9. # return: 'row' that contains a variety of lists that each hold info on the description page
  10. def darkbazar_description_parser(soup):
  11. # Fields to be parsed
  12. vendor = "-1" # 0 *Vendor_Name
  13. success = "-1" # 1 Vendor_Successful_Transactions
  14. rating_vendor = "-1" # 2 Vendor_Rating
  15. name = "-1" # 3 *Product_Name
  16. describe = "-1" # 4 Product_Description
  17. CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  18. MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
  19. category = "-1" # 7 Product_Category
  20. views = "-1" # 8 Product_Number_Of_Views
  21. reviews = "-1" # 9 Product_Number_Of_Reviews
  22. rating_item = "-1" # 10 Product_Rating
  23. addDate = "-1" # 11 Product_AddedDate
  24. BTC = "-1" # 12 Product_BTC_SellingPrice
  25. USD = "-1" # 13 Product_USD_SellingPrice
  26. EURO = "-1" # 14 Product_EURO_SellingPrice
  27. sold = "-1" # 15 Product_QuantitySold
  28. left = "-1" # 16 Product_QuantityLeft
  29. shipFrom = "-1" # 17 Product_ShippedFrom
  30. shipTo = "-1" # 18 Product_ShippedTo
  31. image = "-1" # 19 Product_Image
  32. vendor_image = "-1" # 20 Vendor_Image
  33. # Finding Product Name
  34. divmb = soup.findAll('div', {'class': "mb-1"})
  35. name = divmb[0].text
  36. name = name.replace('\n', ' ')
  37. name = name.replace(",", "")
  38. name = name.strip()
  39. # Finding Vendor
  40. vendor = divmb[1].find('a').text.strip()
  41. # Finding Vendor Rating
  42. temp = soup.find('div', {'class': ""}).text
  43. temp = temp.split('(')
  44. rating = temp[0].replace("Vendor's Review : ", "")
  45. rating = rating.replace("%", "")
  46. rating_vendor = rating.strip()
  47. # Finding the Product Rating and Number of Product Reviews
  48. reviews = temp[2].replace(" review)", "")
  49. reviews = reviews.strip()
  50. temp = temp[1].split(")")
  51. rating = temp[1].replace("Product Review : ", "")
  52. rating = rating.replace("%", "")
  53. rating_item = rating.strip()
  54. # Finding Prices
  55. USD = soup.find('div', {'class': "h3 text-primary"}).text.strip()
  56. # Finding the Product Category
  57. pmb = soup.findAll('p', {'class': "mb-1"})
  58. category = pmb[-1].text
  59. category = category.replace("Category: ", "").strip()
  60. # Finding the Product Quantity Available
  61. left = divmb[-1].text
  62. left = left.split(",", 1)[1]
  63. left = left.replace("in stock", "")
  64. left = left.strip()
  65. # Finding Number Sold
  66. sold = divmb[-1].text
  67. sold = sold.split(",", 1)[0]
  68. sold = sold.replace("sold", "")
  69. sold = sold.strip()
  70. # Finding Shipment Information (Origin)
  71. pmb[0].text
  72. shipFrom = shipFrom.replace("Ships from: ", "").strip()
  73. # Finding Shipment Information (Destination)
  74. pmb[1].text
  75. shipTo = shipTo.replace("Ships to: ", "").strip()
  76. # Finding the Product description
  77. cardbody = soup.findAll('div', {'class': "card-body"})
  78. describe = cardbody[1].text.strip()
  79. # Finding Product Image
  80. image = soup.find('div', {'class': 'product-primary'}).find('img')
  81. image = image.get('src')
  82. image = image.split('base64,')[-1]
  83. # Searching for CVE and MS categories
  84. cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  85. if cve:
  86. CVE = " "
  87. for idx in cve:
  88. CVE += (idx)
  89. CVE += " "
  90. CVE = CVE.replace(',', ' ')
  91. CVE = CVE.replace('\n', '')
  92. ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
  93. if ms:
  94. MS = " "
  95. for im in ms:
  96. MS += (im)
  97. MS += " "
  98. MS = MS.replace(',', ' ')
  99. MS = MS.replace('\n', '')
  100. # Populating the final variable (this should be a list with all fields scraped)
  101. row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
  102. BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
  103. # Sending the results
  104. return row
  105. # parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
  106. # stores info it needs in different lists, these lists are returned after being organized
  107. # @param: soup object looking at html page of listing page
  108. # return: 'row' that contains a variety of lists that each hold info on the listing page
  109. def darkbazar_listing_parser(soup):
  110. # Fields to be parsed
  111. nm = 0 # *Total_Products (Should be Integer)
  112. mktName = "DarkBazar" # 0 *Marketplace_Name
  113. vendor = [] # 1 *Vendor y
  114. rating_vendor = [] # 2 Vendor_Rating
  115. success = [] # 3 Vendor_Successful_Transactions
  116. name = [] # 4 *Product_Name y
  117. CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
  118. MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this
  119. category = [] # 7 Product_Category y
  120. describe = [] # 8 Product_Description
  121. views = [] # 9 Product_Number_Of_Views
  122. reviews = [] # 10 Product_Number_Of_Reviews
  123. rating_item = [] # 11 Product_Rating
  124. addDate = [] # 12 Product_AddDate
  125. BTC = [] # 13 Product_BTC_SellingPrice
  126. USD = [] # 14 Product_USD_SellingPrice y
  127. EURO = [] # 15 Product_EURO_SellingPrice
  128. sold = [] # 16 Product_QuantitySold
  129. qLeft = [] # 17 Product_QuantityLeft
  130. shipFrom = [] # 18 Product_ShippedFrom
  131. shipTo = [] # 19 Product_ShippedTo
  132. image = [] # 20 Product_Image
  133. image_vendor = [] # 21 Vendor_Image
  134. href = [] # 22 Product_Links
  135. listing = soup.findAll('div', {"id": "itembox"})
  136. # Populating the Number of Products
  137. nm = len(listing)
  138. for a in listing:
  139. bae = a.findAll('a', href=True)
  140. lb = a.findAll('div', {"id": "littlebox"})
  141. # Adding the url to the list of urls
  142. link = bae[0].get('href')
  143. href.append(link)
  144. # Finding the Product
  145. product = lb[1].find('a').text
  146. product = product.replace('\n', ' ')
  147. product = product.replace(",", "")
  148. product = product.replace("...", "")
  149. product = product.strip()
  150. name.append(product)
  151. # Finding Product Image
  152. product_image = a.find('img')
  153. product_image = product_image.get('src')
  154. product_image = product_image.split('base64,')[-1]
  155. image.append(product_image)
  156. # Finding Prices
  157. price = lb[-1].find('div', {"class": "mb-1"}).text
  158. price = price.replace("$","")
  159. price = price.strip()
  160. USD.append(price)
  161. # Finding the Vendor
  162. vendor_name = lb[-1].find("a").text
  163. vendor_name = vendor_name.replace(",", "")
  164. vendor_name = vendor_name.strip()
  165. vendor.append(vendor_name)
  166. image_vendor.append("-1")
  167. # Finding the Category
  168. cat = lb[-1].find("span").text
  169. cat = cat.replace("class:", "")
  170. cat = cat.strip()
  171. category.append(cat)
  172. span = lb[0].findAll("span")
  173. # Finding Number of Views
  174. num = span[0].text
  175. num = num.replace("views:", "")
  176. num = num.strip()
  177. sold.append(num)
  178. # Finding Number Sold
  179. num = span[2].text
  180. num = num.replace("Sold:", "")
  181. num = num.strip()
  182. sold.append(num)
  183. # Finding Quantity Left
  184. quant = span[1].text
  185. quant = quant.replace("stock:", "")
  186. quant = quant.strip()
  187. qLeft.append(quant)
  188. # add shipping information
  189. ship = lb[2].findAll('small')[1].findAll('span')[1].text.split("->")
  190. shipFrom.append(ship[0].replace("Ship from ", "").strip())
  191. shipTo.append(ship[1].replace("to ", "").strip())
  192. # Searching for CVE and MS categories
  193. cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  194. if not cve:
  195. cveValue = "-1"
  196. else:
  197. cee = " "
  198. for idx in cve:
  199. cee += (idx)
  200. cee += " "
  201. cee = cee.replace(',', ' ')
  202. cee = cee.replace('\n', '')
  203. cveValue = cee
  204. CVE.append(cveValue)
  205. ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
  206. if not ms:
  207. MSValue = "-1"
  208. else:
  209. me = " "
  210. for im in ms:
  211. me += (im)
  212. me += " "
  213. me = me.replace(',', ' ')
  214. me = me.replace('\n', '')
  215. MSValue = me
  216. MS.append(MSValue)
  217. # Populate the final variable (this should be a list with all fields scraped)
  218. return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
  219. reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)
  220. # called by the crawler to get description links on a listing page
  221. # @param: beautifulsoup object that is using the correct html page (listing page)
  222. # return: list of description links from a listing page
  223. def darkbazar_links_parser(soup):
  224. # Returning all links that should be visited by the Crawler
  225. href = []
  226. listing = soup.findAll('div', {"id": "itembox"})
  227. # for a in listing:
  228. # bae = a.find('a', {"class": "text-info"}, href=True)
  229. # link = bae['href']
  230. # href.append(link)
  231. for a in listing:
  232. bae = a.findAll('a', href=True)
  233. # Adding the url to the list of urls
  234. link = bae[0].get('href')
  235. href.append(link)
  236. return href