this is based on calsyslab project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

309 lines
11 KiB

  1. __author__ = 'chris'
  2. import re
  3. import traceback
  4. # Here, we are importing the auxiliary functions to clean or convert data
  5. from MarketPlaces.Utilities.utilities import *
  6. # Here, we are importing BeautifulSoup to search through the HTML tree
  7. from bs4 import BeautifulSoup
  8. # Import for test run
  9. import glob
  10. import os
  11. import codecs
  12. import shutil
  13. # This is the method to parse the Description Pages (one page to each Product in the Listing Pages)
  14. def Robinhood_description_parser(soup):
  15. # Fields to be parsed
  16. vendor = "-1" # 0 *Vendor_Name
  17. success = "-1" # 1 Vendor_Successful_Transactions
  18. rating_vendor = "-1" # 2 Vendor_Rating
  19. name = "-1" # 3 *Product_Name
  20. describe = "-1" # 4 Product_Description
  21. CVE = "-1" # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  22. MS = "-1" # 6 Product_MS_Classification (Microsoft Security)
  23. category = "-1" # 7 Product_Category
  24. views = "-1" # 8 Product_Number_Of_Views
  25. reviews = "-1" # 9 Product_Number_Of_Reviews
  26. rating_item = "-1" # 10 Product_Rating
  27. addDate = "-1" # 11 Product_AddedDate
  28. BTC = "-1" # 12 Product_BTC_SellingPrice
  29. USD = "-1" # 13 Product_USD_SellingPrice
  30. EURO = "-1" # 14 Product_EURO_SellingPrice
  31. sold = "-1" # 15 Product_QuantitySold
  32. left = "-1" # 16 Product_QuantityLeft
  33. shipFrom = "-1" # 17 Product_ShippedFrom
  34. shipTo = "-1" # 18 Product_ShippedTo
  35. # Finding Product Name
  36. name = soup.find('h1').text
  37. name = name.replace('\n', ' ')
  38. name = name.replace(",", "")
  39. name = name.strip()
  40. # Finding description
  41. desc = ''
  42. primary = soup.find('div', {'id': 'primary'})
  43. product = primary.findAll('div')[1]
  44. commerce = product.findAll('div', recursive=False)[2]
  45. descDiv = commerce.findAll('div')[0]
  46. # descDiv = soup.find('div', {'class': 'woocommerce-Tabs-panel woocommerce-Tabs-panel--description panel entry-content wc-tab'})
  47. descText = descDiv.findAll('p')
  48. for para in descText:
  49. desc = desc + para.text
  50. describe = desc
  51. # Finding Vendor
  52. vendor = soup.find('a', {'class': 'wcfm_dashboard_item_title'}).text
  53. vendor = vendor.replace(",", "")
  54. vendor = vendor.replace("Sold by:", "")
  55. vendor = vendor.strip()
  56. # Finding Category
  57. catSpan = soup.find('span', {'class': 'posted_in'})
  58. category = catSpan.find('a').text
  59. # Finding USD
  60. priceText = soup.find('p', {'class': 'price'}).text
  61. USD = str(priceText).strip()
  62. # Searching for CVE and MS categories
  63. cve = soup.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  64. if cve:
  65. CVE = " "
  66. for idx in cve:
  67. CVE += (idx)
  68. CVE += " "
  69. CVE = CVE.replace(',', ' ')
  70. CVE = CVE.replace('\n', '')
  71. ms = soup.findAll(text=re.compile('MS\d{2}-\d{3}'))
  72. if ms:
  73. MS = " "
  74. for im in ms:
  75. MS += (im)
  76. MS += " "
  77. MS = MS.replace(',', ' ')
  78. MS = MS.replace('\n', '')
  79. # Populating the final variable (this should be a list with all fields scraped)
  80. row = (vendor, rating_vendor, success, name, describe, CVE, MS, category, views, reviews, rating_item, addDate,
  81. BTC, USD, EURO, sold, left, shipFrom, shipTo)
  82. # Sending the results
  83. return row
  84. # This is the method to parse the Listing Pages
  85. def Robinhood_listing_parser(soup):
  86. # Fields to be parsed
  87. nm = 0 # *Total_Products (Should be Integer)
  88. mktName = "Robinhood Market" # 0 *Marketplace_Name
  89. vendor = [] # 1 *Vendor y
  90. rating_vendor = [] # 2 Vendor_Rating
  91. success = [] # 3 Vendor_Successful_Transactions
  92. name = [] # 4 *Product_Name y
  93. CVE = [] # 5 Product_CVE_Classification (Common Vulnerabilities and Exposures)
  94. MS = [] # 6 Product_MS_Classification (Microsoft Security)
  95. category = [] # 7 Product_Category y
  96. describe = [] # 8 Product_Description
  97. views = [] # 9 Product_Number_Of_Views
  98. reviews = [] # 10 Product_Number_Of_Reviews
  99. rating_item = [] # 11 Product_Rating
  100. addDate = [] # 12 Product_AddDate
  101. BTC = [] # 13 Product_BTC_SellingPrice
  102. USD = [] # 14 Product_USD_SellingPrice y
  103. EURO = [] # 15 Product_EURO_SellingPrice
  104. sold = [] # 16 Product_QuantitySold
  105. qLeft =[] # 17 Product_QuantityLeft
  106. shipFrom = [] # 18 Product_ShippedFrom
  107. shipTo = [] # 19 Product_ShippedTo
  108. href = [] # 20 Product_Links
  109. listing = soup.find('ul', {"class": "products columns-4"})
  110. items = listing.findAll('li')
  111. # Populating the Number of Products
  112. nm = len(items)
  113. for card in items:
  114. # Finding Category
  115. cat = soup.find("h1").text
  116. cat = cat.replace('\n', ' ')
  117. cat = cat.replace(",", "")
  118. cat = cat.strip()
  119. category.append(cat)
  120. bae = card.findAll('a')
  121. # Adding the url to the list of urls
  122. link = card.find('a').get('href')
  123. href.append(link)
  124. # Finding Product Name
  125. product = card.find("h2").text
  126. product = product.replace('\n', ' ')
  127. product = product.replace(",", "")
  128. product = product.strip()
  129. name.append(product)
  130. info = card.find('div', {'class': 'wcfmmp_sold_by_container'})
  131. # Finding Vendor
  132. vendor_name = info.find('a', {'class', 'wcfm_dashboard_item_title'}).text
  133. vendor_name = vendor_name.replace(",", "")
  134. vendor_name = vendor_name.strip()
  135. vendor.append(vendor_name)
  136. # Finding USD
  137. span = card.find('span', {'class': 'price'})
  138. if span is not None:
  139. bdi = span.find('bdi')
  140. usdText = bdi.find('span').next_sibling
  141. usdVal = usdText.text
  142. else:
  143. usdVal = "0"
  144. USD.append(usdVal)
  145. # Searching for CVE and MS categories
  146. cve = card.findAll(text=re.compile('CVE-\d{4}-\d{4}'))
  147. if not cve:
  148. cveValue="-1"
  149. else:
  150. cee = " "
  151. for idx in cve:
  152. cee += (idx)
  153. cee += " "
  154. cee = cee.replace(',', ' ')
  155. cee = cee.replace('\n', '')
  156. cveValue=cee
  157. CVE.append(cveValue)
  158. ms = card.findAll(text=re.compile('MS\d{2}-\d{3}'))
  159. if not ms:
  160. MSValue="-1"
  161. else:
  162. me = " "
  163. for im in ms:
  164. me += (im)
  165. me += " "
  166. me = me.replace(',', ' ')
  167. me = me.replace('\n', '')
  168. MSValue=me
  169. MS.append(MSValue)
  170. #print(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
  171. # reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
  172. # Populate the final variable (this should be a list with all fields scraped)
  173. return organizeProducts(mktName, nm, vendor, rating_vendor, success, name, CVE, MS, category, describe, views,
  174. reviews, rating_item, addDate, BTC, USD, EURO, sold, qLeft, shipFrom, shipTo, href)
  175. def Robinhood_links_parser(soup):
  176. # Returning all links that should be visited by the Crawler
  177. href = []
  178. #list = soup.findAll('div', {"class": "woocommerce columns-4"})
  179. listing = soup.find('ul', {"class": "products columns-4"}).findAll('li')
  180. for item in listing:
  181. link = item.find('a')['href']
  182. href.append(link)
  183. return href
  184. if __name__ == '__main__':
  185. nError = 0
  186. marketPlace = 'RobinhoodMarket'
  187. lines = [] # listing pages
  188. lns = [] # description pages
  189. detPage = {}
  190. '''
  191. # reading description pages
  192. count = 0
  193. for fileDescription in glob.glob(os.path.join("..\\" + marketPlace + "\\HTML_Pages\\08082023\\Description", '*.html')):
  194. count += 1
  195. lns.append(fileDescription)
  196. # if count > 5:
  197. # break
  198. for index, line2 in enumerate(lns):
  199. print("Reading description folder of '" + marketPlace + "', file '" + os.path.basename(line2) + "', index= " + str(index + 1) + " ... " + str(len(lns)))
  200. try:
  201. html = codecs.open(line2.strip('\n'), encoding='utf8')
  202. soup = BeautifulSoup(html, "html.parser")
  203. html.close()
  204. except:
  205. try:
  206. html = open(line2.strip('\n'))
  207. soup = BeautifulSoup(html, "html.parser")
  208. html.close()
  209. except:
  210. nError += 1
  211. print("There was a problem to read the file " + line2 + " in the Description section!")
  212. # if createLog:
  213. # logFile.write(str(nError) + ". There was a problem to read the file " + line2 + " in the Description section.\n")
  214. continue
  215. try:
  216. print(Robinhood_description_parser(soup))
  217. except:
  218. traceback.print_exc()
  219. print("There was a problem to parse the file " + line2 + " in the Description section!")
  220. '''
  221. # reading listing pages
  222. count = 0
  223. for fileListing in glob.glob(os.path.join("..\\" + marketPlace + "\\HTML_Pages\\08082023\\Listing", '*.html')):
  224. count += 1
  225. lines.append(fileListing)
  226. #if count > 1:
  227. # break
  228. for index, line1 in enumerate(lines):
  229. print("Reading listing folder of '" + marketPlace + "', file '" + os.path.basename(line1) + "', index= " + str(index + 1) + " ... " + str(len(lines)))
  230. readError = False
  231. try:
  232. html = codecs.open(line1.strip('\n'), encoding='utf8')
  233. soup = BeautifulSoup(html, "html.parser")
  234. html.close()
  235. except:
  236. try:
  237. html = open(line1.strip('\n'))
  238. soup = BeautifulSoup(html, "html.parser")
  239. html.close()
  240. except:
  241. print("There was a problem to read the file " + line1 + " in the Listing section!")
  242. readError = True
  243. if not readError:
  244. parseError = False
  245. try:
  246. test = Robinhood_listing_parser(soup)
  247. print(Robinhood_listing_parser(soup))
  248. except:
  249. traceback.print_exc()
  250. print("There was a problem to parse the file " + line1 + " in the listing section!")
  251. parseError = True
  252. print("DONE")