this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

288 lines
12 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. import re
  3. # Here, we are importing the auxiliary functions to clean or convert data
  4. from MarketPlaces.Utilities.utilities import *
  5. # Here, we are importing BeautifulSoup to search through the HTML tree
  6. from bs4 import BeautifulSoup
  7. # parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
  8. # stores info it needs in different lists, these lists are returned after being organized
  9. # @param: soup object looking at html page of description page
  10. # return: 'row' that contains a variety of lists that each hold info on the description page
  11. def bohemia_description_parser(soup):
  12. # Fields to be parsed
  13. vendor = "-1" # 0 *Vendor_Name
  14. success = "-1" # 1 Vendor_Successful_Transactions
  15. rating_vendor = "-1" # 2 Vendor_Rating
  16. name = "-1" # 3 *Product_Name
  17. describe = "-1" # 4 Product_Description
  18. CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  19. MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
  20. category = "-1" # 7 Product_Category
  21. views = "-1" # 8 Product_Number_Of_Views
  22. reviews = "-1" # 9 Product_Number_Of_Reviews
  23. rating_item = "-1" # 10 Product_Rating
  24. addDate = "-1" # 11 Product_AddedDate
  25. BTC = "-1" # 12 Product_BTC_SellingPrice
  26. USD = "-1" # 13 Product_USD_SellingPrice
  27. EURO = "-1" # 14 Product_EURO_SellingPrice
  28. sold = "-1" # 15 Product_QuantitySold
  29. left = "-1" # 16 Product_QuantityLeft
  30. shipFrom = "-1" # 17 Product_ShippedFrom
  31. shipTo = "-1" # 18 Product_ShippedTo
  32. # Finding Product Name
  33. name = soup.find('h1', {"style": "margin: 0; margin-bottom: 0.5em;"}).text
  34. name = name.replace('\n', ' ')
  35. name = name.replace(",", "")
  36. name = name.strip()
  37. # Finding Vendor
  38. vendor = soup.find('div', {"class": "user-photo"}).find_next_sibling('a').text
  39. vendor = vendor.strip()
  40. # Finding Vendor Rating
  41. rating_vendor = soup.find('span', {'class': "user-percent"}).text.strip()
  42. # Finding Users' Successful Transactions
  43. temp = ''
  44. success = soup.find('span', {'class': "smalltext shadow-text"}).text
  45. temp = success.split("|")
  46. success = str(temp[1])
  47. success = success.strip()
  48. # Finding Prices
  49. prices = soup.find('div', {'class': "col-md-3 sidebar-navigation user-details"}
  50. ).find('div', {'class': "container text-left"})
  51. USD = prices.find('h1').text.strip()
  52. BTC = prices.find('h1').find_next_sibling('h3').text
  53. BTC = BTC.replace("BTC", "")
  54. BTC = BTC.strip()
  55. detail_row = soup.find('div', {'class': "detail-container text-left"}).find_all('strong')
  56. # Finding the Product Category (there isnt a thing for it on the page
  57. # category = li[1].find('span', {'class': "tag is-dark"}).text.strip()
  58. # Finding the Product Quantity Available
  59. left = soup.find('div', {'class': "container detail-container text-left"})
  60. left = left.find('div', {'class': "detail-row"}).text.replace('\n', '')
  61. left = left.split("Available Stock:")
  62. left = left[1].strip()
  63. # Finding Number Sold
  64. sold = detail_row[0].find_parent()
  65. sold = sold.text
  66. sold = sold.replace("Total Sold:", "")
  67. sold = sold.strip()
  68. # Finding Shipment Information (Origin) (There is no shipping information)
  69. '''if "Ships from:" in li[-2].text:
  70. shipFrom = li[-2].text
  71. shipFrom = shipFrom.replace("Ships from: ", "")
  72. # shipFrom = shipFrom.replace(",", "")
  73. shipFrom = shipFrom.strip()'''
  74. # Finding Shipment Information (Destination) (No shipping info
  75. '''shipTo = li[-1].find('div', {'title': "List of countries is scrollable"}).text
  76. shipTo = shipTo.replace("Ships to: ", "")
  77. shipTo = shipTo.strip()
  78. if "certain countries" in shipTo:
  79. countries = ""
  80. tags = li[-1].find_all('span', {'class': "tag"})
  81. for tag in tags:
  82. country = tag.text.strip()
  83. countries += country + ", "
  84. shipTo = countries.strip(", ")'''
  85. # Finding the Product description
  86. describe = soup.find('div', {'class': "container feedback-container"})
  87. describe = describe.find_next_sibling('div', {'class': "container"}).find('p').text
  88. describe = describe.replace("\n", " ")
  89. describe = describe.strip()
  90. # Finding the Number of Product Reviews
  91. review = detail_row[2].find_parent().text
  92. review = review.split("Based on")
  93. review = review[1].replace("ratings)", "").strip()
  94. # Searching for CVE and MS categories (cant find it)
  95. cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  96. if cve:
  97. CVE = " "
  98. for idx in cve:
  99. CVE += (idx)
  100. CVE += " "
  101. CVE = CVE.replace(',', ' ')
  102. CVE = CVE.replace('\n', '')
  103. ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
  104. if ms:
  105. MS = " "
  106. for im in ms:
  107. MS += (im)
  108. MS += " "
  109. MS = MS.replace(',', ' ')
  110. MS = MS.replace('\n', '')
  111. # Populating the final variable (this should be a list with all fields scraped)
  112. row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
  113. BTC, USD, EURO, sold, left, shipFrom, shipTo)
  114. # Sending the results
  115. return row
  116. # parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
  117. # stores info it needs in different lists, these lists are returned after being organized
  118. # @param: soup object looking at html page of listing page
  119. # return: 'row' that contains a variety of lists that each hold info on the listing page
  120. def bohemia_listing_parser(soup):
  121. # Fields to be parsed
  122. nm = 0 # *Total_Products (Should be Integer)
  123. mktName = "Bohemia" # 0 *Marketplace_Name
  124. vendor = [] # 1 *Vendor y
  125. rating_vendor = [] # 2 Vendor_Rating
  126. success = [] # 3 Vendor_Successful_Transactions
  127. name = [] # 4 *Product_Name y
  128. CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  129. MS = [] # 6 Product_MS_Classification (Microsoft Security)
  130. category = [] # 7 Product_Category y
  131. describe = [] # 8 Product_Description
  132. views = [] # 9 Product_Number_Of_Views
  133. reviews = [] # 10 Product_Number_Of_Reviews
  134. rating_item = [] # 11 Product_Rating
  135. addDate = [] # 12 Product_AddDate
  136. BTC = [] # 13 Product_BTC_SellingPrice
  137. USD = [] # 14 Product_USD_SellingPrice y
  138. EURO = [] # 15 Product_EURO_SellingPrice
  139. sold = [] # 16 Product_QuantitySold
  140. qLeft =[] # 17 Product_QuantityLeft
  141. shipFrom = [] # 18 Product_ShippedFrom
  142. shipTo = [] # 19 Product_ShippedTo
  143. href = [] # 20 Product_Links
  144. listing = soup.findAll('div', {"class": "product-link"})
  145. # Populating the Number of Products
  146. nm = len(listing)
  147. for a in listing:
  148. bae = a.findAll('a', href=True)
  149. # Adding the url to the list of urls
  150. link = bae[0].get('href')
  151. link = cleanLink(link)
  152. href.append(link)
  153. # Finding the Product
  154. product = bae[0].text
  155. product = product.replace('\n', ' ')
  156. product = product.replace(",", "")
  157. product = product.replace("...", "")
  158. product = product.strip()
  159. name.append(product)
  160. bae = a.find('div', {'class': "container"})
  161. # Finding Prices
  162. price = bae.find('div', {'class': "product-price"}).find('h2').text
  163. ud = price.replace("USD", " ")
  164. # u = ud.replace("$","")
  165. ud = ud.replace(",", "")
  166. ud = ud.strip()
  167. USD.append(ud)
  168. bc = bae.find('div', {'class': "product-price"}).find('span', {'class': "shadow-text smalltext boldtext"}).text
  169. bc = bc.replace("\n", "")
  170. bc = bc.split()
  171. bc = bc[0].replace("BTC", "").strip()
  172. BTC.append(bc)
  173. # Finding the Vendor
  174. vendor_name = bae.find('b').find('a').text
  175. vendor_name = vendor_name.strip()
  176. vendor.append(vendor_name)
  177. # Finding the Category
  178. cat = bae.find('span', {'class': "shadow-text smalltext"}).find('strong').text
  179. cat = cat.strip()
  180. category.append(cat)
  181. # Finding Number Sold and Quantity Left
  182. num = bae.find('div', {'class': "product-details-bottom"}).find('span').text
  183. num = num.replace("Sold", "")
  184. num = num.replace("times in total", "")
  185. num = num.strip()
  186. sold.append(num)
  187. quant = bae.find('div', {'class': "product-price"}).text
  188. quant = quant.replace("\n", "")
  189. quant = quant.split("Available")
  190. quant = quant[0].replace("Autoship", "").strip()
  191. qLeft.append(quant)
  192. # Finding Successful Transactions
  193. freq = bae.find('div', {'title': "Total Sales"}).find_parent().text.replace("\n", "")
  194. freq = freq.strip().split()
  195. freq = freq[-1].strip()
  196. success.append(freq)
  197. # find vendor rating
  198. rate = bae.find('b').find('strong').text.strip()
  199. rating_vendor.append(rate)
  200. # Searching for CVE and MS categories
  201. cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  202. if not cve:
  203. cveValue = "-1"
  204. else:
  205. cee = " "
  206. for idx in cve:
  207. cee += (idx)
  208. cee += " "
  209. cee = cee.replace(',', ' ')
  210. cee = cee.replace('\n', '')
  211. cveValue = cee
  212. CVE.append(cveValue)
  213. ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
  214. if not ms:
  215. MSValue = "-1"
  216. else:
  217. me = " "
  218. for im in ms:
  219. me += (im)
  220. me += " "
  221. me = me.replace(',', ' ')
  222. me = me.replace('\n', '')
  223. MSValue = me
  224. MS.append(MSValue)
  225. # Populate the final variable (this should be a list with all fields scraped)
  226. return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
  227. reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
  228. # called by the crawler to get description links on a listing page
  229. # @param: beautifulsoup object that is using the correct html page (listing page)
  230. # return: list of description links from a listing page
  231. def bohemia_links_parser(soup):
  232. # Returning all links that should be visited by the Crawler
  233. href = []
  234. temp = soup.find('div', {"class": "col-md-9 sidebar-content-right listing-content"})
  235. temp = temp.find('div', {"class": "product-listing"})
  236. listing = temp.findAll('div', {"class": "product-heading"})
  237. for a in listing:
  238. bae = a.find('a', href=True)
  239. link = bae['href']
  240. href.append(link)
  241. return href