this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

254 lines
10 KiB

  1. __author__ = 'DarkWeb'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from MarketPlaces.Utilities.utilities import *
  4. # Here, we are importing BeautifulSoup to search through the HTML tree
  5. from bs4 import BeautifulSoup
  6. import re
  7. # parses description pages, so takes html pages of description pages using soup object, and parses it for info it needs
  8. # stores info it needs in different lists, these lists are returned after being organized
  9. # @param: soup object looking at html page of description page
  10. # return: 'row' that contains a variety of lists that each hold info on the description page
  11. def zeroday_description_parser(soup):
  12. # Fields to be parsed
  13. vendor = "-1" # 0 *Vendor_Name y
  14. success = "-1" # 1 Vendor_Successful_Transactions n
  15. rating_vendor = "-1" # 2 Vendor_Rating y
  16. name = "-1" # 3 *Product_Name y
  17. describe = "-1" # 4 Product_Description y
  18. CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  19. MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
  20. category = "-1" # 7 Product_Category y
  21. views = "-1" # 8 Product_Number_Of_Views y
  22. reviews = "-1" # 9 Product_Number_Of_Reviews n
  23. rating_item = "-1" # 10 Product_Rating n
  24. addDate = "-1" # 11 Product_AddedDate y
  25. BTC = "-1" # 12 Product_BTC_SellingPrice y
  26. USD = "-1" # 13 Product_USD_SellingPrice y
  27. EURO = "-1" # 14 Product_EURO_SellingPrice n
  28. sold = "-1" # 15 Product_QuantitySold n
  29. left = "-1" # 16 Product_QuantityLeft n
  30. shipFrom = "-1" # 17 Product_ShippedFrom n
  31. shipTo = "-1" # 18 Product_ShippedTo n
  32. image = "-1" # 19 Product_Image n
  33. vendor_image = "-1" # 20 Vendor_Image n
  34. # Finding Vendor
  35. div_vendor = soup.find('div', {'class': "exploit_view_table_user_content"})
  36. vendor = div_vendor.find('a').text
  37. vendor = cleanString(vendor).strip()
  38. # Finding Vendor Rating (bug in their system shows standard rating)
  39. div_rating = div_vendor.find_next_sibling('div')
  40. rating_vendor = div_rating.find_all('div', {'class': "td"})[1].text
  41. rating_vendor = cleanNumbers(rating_vendor).strip()
  42. # Finding Product Name
  43. div_name = soup.find('div', {'class': "exploit_title"})
  44. name = div_name.find('h1', {'class': "YellowText"}).text
  45. name = cleanString(name).strip()
  46. # Finding Product description
  47. describe = soup.find('meta', attrs={'name': "description"}).get("content")
  48. describe = cleanString(describe).strip()
  49. # Searching for CVE and MS categories
  50. cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  51. if cve:
  52. CVE = " "
  53. for idx in cve:
  54. CVE += (idx)
  55. CVE += " "
  56. CVE = CVE.replace(',', ' ')
  57. CVE = CVE.replace('\n', '')
  58. CVE = CVE.replace('\t', '')
  59. ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
  60. if ms:
  61. MS = " "
  62. for im in ms:
  63. MS += (im)
  64. MS += " "
  65. MS = MS.replace(',', ' ')
  66. MS = MS.replace('\n', '')
  67. # Finding category
  68. div_category = soup.find('div', {'class': "td"}, text="Category").find_next_sibling('div', {'class': "td"})
  69. category = div_category.text
  70. category = cleanString(category).strip()
  71. # Finding views
  72. div_views = soup.find('div', {'class': "td"}, text="Views").find_next_sibling('div', {'class': "td"})
  73. views = div_views.text.replace(' ', '').strip()
  74. # Finding added date
  75. div_date = soup.find('div', {'class': 'td'}, text='Date add').find_next_sibling('div', {'class': "td"})
  76. addDate = div_date.text.strip()
  77. # addDate = datetime.strptime(addDate, "%d-%m-%Y")
  78. # Finding BTC and USD/GOLD
  79. div_price = soup.find('div', {'class': "td"}, text="Price")
  80. price = div_price.find_next_sibling('div', {'class': "td"}).text.strip()
  81. if "free" in price.lower():
  82. BTC = 0
  83. USD = 0
  84. else:
  85. price = ''.join(price.split())
  86. index = price.index('BTC')
  87. BTC = price[:index]
  88. USD = price[index + 3:].replace('USD', '')
  89. # Populating the final variable (this should be a list with all fields scraped)
  90. row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
  91. BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
  92. # Sending the results
  93. return row
  94. # parses listing pages, so takes html pages of listing pages using soup object, and parses it for info it needs
  95. # stores info it needs in different lists, these lists are returned after being organized
  96. # @param: soup object looking at html page of listing page
  97. # return: 'row' that contains a variety of lists that each hold info on the listing page
  98. def zeroday_listing_parser(soup):
  99. # Fields to be parsed
  100. nm = 0 # *Total_Products (Should be Integer)
  101. mktName = "ZeroDay" # 0 *Marketplace_Name y
  102. vendor = [] # 1 *Vendor y
  103. rating_vendor = [] # 2 Vendor_Rating y
  104. success = [] # 3 Vendor_Successful_Transactions n
  105. name = [] # 4 *Product_Name y
  106. CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures) dont worry about this
  107. MS = [] # 6 Product_MS_Classification (Microsoft Security) dont worry about this
  108. category = [] # 7 Product_Category y
  109. describe = [] # 8 Product_Description n
  110. views = [] # 9 Product_Number_Of_Views y
  111. reviews = [] # 10 Product_Number_Of_Reviews n
  112. rating_item = [] # 11 Product_Rating n
  113. addDate = [] # 12 Product_AddDate y
  114. BTC = [] # 13 Product_BTC_SellingPrice y
  115. USD = [] # 14 Product_USD_SellingPrice y
  116. EURO = [] # 15 Product_EURO_SellingPrice n
  117. sold = [] # 16 Product_QuantitySold n
  118. qLeft = [] # 17 Product_QuantityLeft n
  119. shipFrom = [] # 18 Product_ShippedFrom n
  120. shipTo = [] # 19 Product_ShippedTo n
  121. image = [] # 20 Product_Image n
  122. image_vendor = [] # 21 Vendor_Image n
  123. href = [] # 22 Product_Links y
  124. listings = soup.findAll('div', {"class": "ExploitTableContent"})
  125. # Populating the Number of Products
  126. nm = len(listings)
  127. for listing in listings:
  128. # Finding the vendor name
  129. vendor_name = listing.find('a', href=lambda href: href and '/author/' in href).text
  130. vendor_name = cleanString(vendor_name).strip()
  131. vendor.append(vendor_name)
  132. # Finding the vendor rating
  133. vendor_level_div = listing.find('div', {'class': "tips_bl"})
  134. vendor_level = vendor_level_div.find('div', {'class': "tips_value_big"}).text
  135. vendor_level = cleanNumbers(vendor_level)
  136. rating_vendor.append(vendor_level)
  137. # Finding the product name
  138. product_name = listing.find('h3').text
  139. product_name = cleanString(product_name).strip()
  140. name.append(product_name)
  141. # Searching for CVE and MS categories
  142. cve = listing.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  143. if not cve:
  144. cveValue = "-1"
  145. else:
  146. cee = " "
  147. for idx in cve:
  148. cee += (idx)
  149. cee += " "
  150. cee = cee.replace(',', ' ')
  151. cee = cee.replace('\n', '')
  152. cee = cee.replace('\t', '')
  153. cveValue = cee
  154. CVE.append(cveValue)
  155. ms = listing.findAll(text=re.compile('MS\d{2}-\d{3}'))
  156. if not ms:
  157. MSValue = "-1"
  158. else:
  159. me = " "
  160. for im in ms:
  161. me += (im)
  162. me += " "
  163. me = me.replace(',', ' ')
  164. me = me.replace('\n', '')
  165. MSValue = me
  166. MS.append(MSValue)
  167. # Finding the category
  168. category_text = listing.find_all('div', {'class': "td"})[2].text
  169. category_text = cleanString(category_text).strip()
  170. category.append(category_text)
  171. # Finding the hrefs
  172. description_link = listing.find('h3').find('a')['href']
  173. href.append(description_link)
  174. # Finding the views
  175. views_text = listing.find_all('div', {'class': "td"})[3].text.replace(' ', '').strip()
  176. views.append(views_text)
  177. # Finding the date added
  178. date = listing.find('div', {'class': "td"}).find('a').text.strip()
  179. # date = datetime.strptime(date, "%d-%m-%Y")
  180. addDate.append(date)
  181. # Finding the BTC and USD/GOLD
  182. btc_listing = listing.find('div', {"class": 'tips_price_btc'})
  183. if btc_listing:
  184. btc_price = btc_listing.text.strip().replace('Open this exploit for ', '').replace(' BTC', '')
  185. crossed = btc_listing.find('span', {'class': "crossed"})
  186. if crossed:
  187. btc_price = crossed.next_sibling.strip().replace(' BTC', '')
  188. else:
  189. btc_price = 0
  190. BTC.append(btc_price)
  191. usd_listing = listing.find('div', {"class": 'tips_price_1'})
  192. if usd_listing:
  193. usd_price = usd_listing.text.strip().replace('Open this exploit for ', '').replace(' GOLD', '')
  194. crossed = usd_listing.find('span', {'class': "crossed"})
  195. if crossed:
  196. usd_price = crossed.next_sibling.strip().replace(' GOLD', '')
  197. usd_price = ''.join(usd_price.replace(' ', ''))
  198. else:
  199. usd_price = 0
  200. USD.append(usd_price)
  201. # Populate the final variable (this should be a list with all fields scraped)
  202. return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
  203. reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image,
  204. image_vendor)
  205. # called by the crawler to get description links on a listing page
  206. # @param: beautifulsoup object that is using the correct html page (listing page)
  207. # return: list of description links from a listing page
  208. def zeroday_links_parser(soup):
  209. # Returning all links that should be visited by the Crawler
  210. href = []
  211. listings = soup.findAll('div', {"class": "ExploitTableContent"})
  212. for listing in listings:
  213. # Adding the url to the list of urls
  214. description_link = listing.find('h3').find('a')['href']
  215. href.append(description_link)
  216. return href