this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

261 lines
10 KiB

1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
  1. __author__ = 'DarkWeb'
  2. # Here, we are importing the auxiliary functions to clean or convert data
  3. from MarketPlaces.Utilities.utilities import *
  4. # Here, we are importing BeautifulSoup to search through the HTML tree
  5. from bs4 import BeautifulSoup
  6. # This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
  7. def wethenorth_description_parser(soup):
  8. # Fields to be parsed
  9. vendor = "-1" # 0 *Vendor_Name
  10. success = "-1" # 1 Vendor_Successful_Transactions
  11. rating_vendor = "-1" # 2 Vendor_Rating
  12. name = "-1" # 3 *Product_Name
  13. describe = "-1" # 4 Product_Description
  14. CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  15. MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
  16. category = "-1" # 7 Product_Category
  17. views = "-1" # 8 Product_Number_Of_Views
  18. reviews = "-1" # 9 Product_Number_Of_Reviews
  19. rating_item = "-1" # 10 Product_Rating
  20. addDate = "-1" # 11 Product_AddedDate
  21. BTC = "-1" # 12 Product_BTC_SellingPrice
  22. USD = "-1" # 13 Product_USD_SellingPrice
  23. EURO = "-1" # 14 Product_EURO_SellingPrice
  24. sold = "-1" # 15 Product_QuantitySold
  25. left = "-1" # 16 Product_QuantityLeft
  26. shipFrom = "-1" # 17 Product_ShippedFrom
  27. shipTo = "-1" # 18 Product_ShippedTo
  28. image = "-1" # 19 Product_Image
  29. vendor_image = "-1" # 20 Vendor_Image
  30. # Finding Product Name
  31. listDes = soup.find('div', {'class': "listDes"})
  32. name = listDes.find('h2').text
  33. name = cleanString(name)
  34. name = name.strip()
  35. # Finding Vendor
  36. vendor = listDes.find('b').text
  37. vendor = cleanString(vendor)
  38. vendor = vendor.strip()
  39. # Finding Vendor Rating
  40. rating = listDes.find('span', {'class': 'levelSet'})
  41. rating = rating.text
  42. rating = cleanNumbers(rating)
  43. rating_vendor = rating.strip()
  44. # Finding Prices - all prices in We The North are in CAD, I left the CAD in the resulting String so that it would show CAD for all prices
  45. padp = listDes.find('p', {'class': 'padp'})
  46. USD = padp.find('span').text
  47. USD = USD.strip()
  48. BTC = padp.find_next_sibling('p').text
  49. BTC = cleanNumbers(BTC)
  50. BTC = BTC.strip()
  51. # Finding Escrow - no escrow on WTN market
  52. shipping_info = listDes.find('tbody')
  53. if "Digital" not in shipping_info:
  54. shipping_info = shipping_info.find_all('tr')
  55. row1 = shipping_info[0].find_all('td')
  56. # Finding Shipment Information (Origin)
  57. shipFrom = row1[-1].text
  58. shipFrom = cleanString(shipFrom)
  59. shipFrom = shipFrom.strip()
  60. if shipFrom == "":
  61. shipFrom = "-1"
  62. row2 = shipping_info[1].find_all('td')
  63. # Finding Shipment Information (Destination)
  64. shipTo = row2[-1].text
  65. shipTo = cleanString(shipTo)
  66. shipTo = shipTo.strip()
  67. if shipTo == "":
  68. shipTo = "-1"
  69. # Finding the Product description
  70. describe = soup.find("div", {'class': 'tabcontent'})
  71. describe = describe.find('p').text
  72. describe = cleanString(describe)
  73. describe = describe.strip()
  74. # Searching for CVE and MS categories
  75. # no CVE or MS for WTN market
  76. # Populating the final variable (this should be a list with all fields scraped)
  77. row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
  78. BTC, USD, EURO, sold, left, shipFrom, shipTo, image, vendor_image)
  79. # Sending the results
  80. return row
  81. # This is the method to parse the Listing Pages
  82. def wethenorth_listing_parser(soup):
  83. # Fields to be parsed
  84. nm = 0 # *Total_Products (Should be Integer)
  85. mktName = "WeTheNorth" # 0 *Marketplace_Name
  86. vendor = [] # 1 *Vendor y
  87. rating_vendor = [] # 2 Vendor_Rating
  88. success = [] # 3 Vendor_Successful_Transactions
  89. name = [] # 4 *Product_Name y
  90. CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  91. MS = [] # 6 Product_MS_Classification (Microsoft Security)
  92. category = [] # 7 Product_Category y
  93. describe = [] # 8 Product_Description
  94. views = [] # 9 Product_Number_Of_Views
  95. reviews = [] # 10 Product_Number_Of_Reviews
  96. rating_item = [] # 11 Product_Rating
  97. addDate = [] # 12 Product_AddDate
  98. BTC = [] # 13 Product_BTC_SellingPrice
  99. USD = [] # 14 Product_USD_SellingPrice y
  100. EURO = [] # 15 Product_EURO_SellingPrice
  101. sold = [] # 16 Product_QuantitySold
  102. qLeft =[] # 17 Product_QuantityLeft
  103. shipFrom = [] # 18 Product_ShippedFrom
  104. shipTo = [] # 19 Product_ShippedTo
  105. image = [] # 20 Product_Image
  106. image_vendor = [] # 21 Vendor_Image
  107. href = [] # 22 Product_Links
  108. right_content = soup.find('div', {"class": "right-content"})
  109. listing = right_content.findAll('div', {"class": "col-1search"})
  110. listing = listing[3:]
  111. # Populating the Number of Products
  112. nm = len(listing)
  113. for a in listing:
  114. bae = a.findAll('a', href=True)
  115. # Adding the url to the list of urls
  116. link = bae[0].get('href')
  117. href.append(link)
  118. # Finding the Vendor
  119. vendor_name = a.find('p', {'class': 'padp'})
  120. vendor_name = vendor_name.find('a').text
  121. vendor_name = cleanString(vendor_name)
  122. vendor_name = vendor_name.strip()
  123. vendor.append(vendor_name)
  124. # Finding the Product
  125. product = a.find('div', {'class': 'col-1centre'})
  126. product = product.find('div', {'class': 'head'}).find('a').text
  127. product = cleanString(product)
  128. product = product.strip()
  129. name.append(product)
  130. # Finding the Category
  131. category_name = a.find('p', {'class': 'padp'}).text
  132. first_dash = category_name.find('-')
  133. second_dash = category_name[first_dash+1:].find('-')
  134. category_name = category_name[first_dash+1: first_dash + second_dash]
  135. category_name = cleanString(category_name)
  136. category_name = category_name.strip()
  137. category.append(category_name)
  138. # Finding Success Transactions
  139. vendor_success = a.find('p', {'class': 'padp'}).text
  140. first_dash = vendor_success.find('(')
  141. vendor_success = vendor_success[first_dash + 1:]
  142. vendor_success = cleanNumbers(vendor_success)
  143. vendor_success = vendor_success.strip()
  144. success.append(vendor_success)
  145. # Finding Views
  146. view_count = a.text
  147. view_count = view_count[view_count.find('Views:'): view_count.find('Sales:')]
  148. view_count = view_count.replace('Views:', ' ')
  149. view_count = cleanNumbers(view_count)
  150. view_count = view_count.strip()
  151. views.append(view_count)
  152. # Finding Quantity Sold
  153. sold_count = a.text
  154. sold_count = sold_count[sold_count.find('Sales:'): sold_count.find('Short')]
  155. sold_count = sold_count.replace('Sales:', ' ')
  156. sold_count = cleanNumbers(sold_count)
  157. sold_count = sold_count.strip()
  158. sold.append(sold_count)
  159. right = a.find('div', {'class': 'col-1right'})
  160. # Finding USD
  161. usd = right.find('a').text
  162. usd = "CAD " + usd.strip()
  163. USD.append(usd)
  164. # Finding BTC
  165. btc = right.text
  166. first_dash = btc.find('(')
  167. second_dash = btc[first_dash + 1:].find(')')
  168. btc = btc[first_dash + 1: first_dash + second_dash]
  169. btc = cleanNumbers(btc)
  170. btc = btc.strip()
  171. BTC.append(btc)
  172. # Finding Product Image
  173. product_image = right.find('img')
  174. product_image = product_image.get('src')
  175. product_image = product_image.split('base64,')[-1]
  176. image.append(product_image)
  177. # Searching for CVE and MS categories
  178. # no CVE or MS in WTN market
  179. cve = a.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  180. if not cve:
  181. cveValue="-1"
  182. else:
  183. cee = " "
  184. for idx in cve:
  185. cee += (idx)
  186. cee += " "
  187. cee = cee.replace(',', ' ')
  188. cee = cee.replace('\n', '')
  189. cveValue=cee
  190. CVE.append(cveValue)
  191. ms = a.findAll(text=re.compile('MS\d{2}-\d{3}'))
  192. if not ms:
  193. MSValue="-1"
  194. else:
  195. me = " "
  196. for im in ms:
  197. me += (im)
  198. me += " "
  199. me = me.replace(',', ' ')
  200. me = me.replace('\n', '')
  201. MSValue=me
  202. MS.append(MSValue)
  203. # Populate the final variable (this should be a list with all fields scraped)
  204. return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
  205. reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href, image, image_vendor)
  206. def wethenorth_links_parser(soup):
  207. # Returning all links that should be visited by the Crawler
  208. href = []
  209. right_content = soup.find('div',{"class": "right-content"})
  210. listing = right_content.findAll('div', {"class": "col-1search"})
  211. #cut out the irrelevant products that are in blue, the first three products of each page usually unrelated
  212. listing = listing[3:]
  213. for a in listing:
  214. link = a.find('a')
  215. link = link['href']
  216. href.append(link)
  217. return href