Skip to content

Commit 4bd9199

Browse files
committed
corrected comments based on PR feedback
Signed-off-by: rherrell <russ.herrell@hpe.com>
1 parent d89cafd commit 4bd9199

File tree

2 files changed

+27
-52
lines changed

2 files changed

+27
-52
lines changed

sunfish_plugins/events_handlers/redfish/redfish_event_handler.py

Lines changed: 8 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,6 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event
4646
response = response.json()
4747

4848
### Save agent registration
49-
# connection_method_name = connectionMethodId.split('/')[-1]
50-
# connection_method_name = connectionMethodId[:-len(connection_method_name)]
5149
event_handler.core.storage_backend.write(response)
5250

5351
aggregation_source_id = str(uuid.uuid4())
@@ -85,8 +83,6 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont
8583
# sunfishAliasDB contains renaming data, the alias xref array, the boundaryLink
8684
# data, and assorted flags that are used during upload renaming and final merge of
8785
# boundary components based on boundary links.
88-
89-
#
9086
#
9187

9288
logger.info("New resource created")
@@ -149,7 +145,6 @@ def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context
149145
#
150146
logger.info("TriggerEvent method called")
151147
file_to_send = event['MessageArgs'][0] # relative Resource Path
152-
#file_path = os.path.join(self.conf['redfish_root'], file_to_send)
153148
hostname = event['MessageArgs'][1] # target address
154149
destination = hostname + "/EventListener" # may match a Subscription object's 'Destination' property
155150
logger.debug(f"path of file_to_send is {file_to_send}")
@@ -299,7 +294,6 @@ def forward_event(self, list, payload):
299294
Returns:
300295
list: list of all the reachable subcribers for the event.
301296
"""
302-
# resp = 400
303297

304298
for id in list:
305299
path = os.path.join(self.redfish_root, 'EventService', 'Subscriptions', id)
@@ -403,39 +397,24 @@ def handleNestedObject(self, obj):
403397
# this needs to be done on ALL agents, not just the one we just uploaded
404398
RedfishEventHandler.updateAllAgentsRedirectedLinks(self)
405399

406-
return visited #why not the 'fetched' list?
400+
return visited
407401

408402
def create_uploaded_object(self, path: str, payload: dict):
409403
# before to add the ID and to call the methods there should be the json validation
410404

411405
# generate unique uuid if is not present
412406
if '@odata.id' not in payload and 'Id' not in payload:
413407
pass
414-
#id = str(uuid.uuid4())
415-
#to_add = {
416-
#'Id': id,
417-
#'@odata.id': os.path.join(path, id)
418-
#}
419-
#payload.update(to_add)
420408
raise exception(f"create_uploaded_object: no Redfish ID (@odata.id) found")
421409

422-
#object_type = self._get_type(payload)
423410
# we assume agents can upload collections, just not the root level collections
424411
# we will check for uploaded collections later
425-
#if "Collection" in object_type:
426-
#raise CollectionNotSupported()
427412

428413
payload_to_write = payload
429414

430415
try:
431-
# 1. check the path target of the operation exists
432-
# self.storage_backend.read(path)
433-
# 2. we don't check the manager; we assume uploading agent is the manager unless it says otherwise
434-
#agent_response = self.objects_manager.forward_to_manager(SunfishRequestType.CREATE, path, payload=payload)
435-
#if agent_response:
436-
#payload_to_write = agent_response
437-
# 3. should be no custom handler, this is not a POST, we upload the objects directly into the Redfish database
438-
#self.objects_handler.dispatch(object_type, path, SunfishRequestType.CREATE, payload=payload)
416+
# this would be another location to verify new object to be written
417+
# meets Sunfish and Redfish requirements
439418
pass
440419
except ResourceNotFound:
441420
logger.error("The collection where the resource is to be created does not exist.")
@@ -445,7 +424,7 @@ def create_uploaded_object(self, path: str, payload: dict):
445424
# The object does not have a handler.
446425
logger.debug(f"The object {object_type} does not have a custom handler")
447426
pass
448-
# 4. persist change in Sunfish tree
427+
# persist change in Sunfish tree
449428
return self.storage_backend.write(payload_to_write)
450429

451430
def get_aggregation_source(self, aggregation_source):
@@ -500,7 +479,10 @@ def fetchResource(self, obj_id, aggregation_source):
500479
if response.status_code == 200: # Agent must have returned this object
501480
redfish_obj = response.json()
502481
# however, it must be a minimally valid object
503-
# the following test should really be more extensive, but for now:
482+
# This would be a great spot to insert a call to a Redfish schema validation function
483+
# that could return a grading of this new redfish_obj: [PASS, FAIL, CAUTIONS]
484+
# However, we are debugging not just code, but also new Redfish schema,
485+
# so for now we just test for two required Redfish Properties to help weed out obviously incorrect responses
504486
if '@odata.id' in redfish_obj and '@odata.type' in redfish_obj:
505487

506488
# now rename if necessary and copy object into Sunfish inventory
@@ -608,7 +590,6 @@ def createInspectedObject(self,redfish_obj, aggregation_source):
608590
if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort":
609591
RedfishEventHandler.track_boundary_port(self, redfish_obj, aggregation_source)
610592
# is this new object a new fabric object with same fabric UUID as an existing fabric?
611-
# RedfishEventHandler.checkForAliasedFabrics(self, redfish_obj, aggregation_source)
612593
RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj)
613594

614595
return redfish_obj
@@ -863,7 +844,6 @@ def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB):
863844
# extract the Endpoint URI associated with this parent object
864845
host_obj = self.storage_backend.read(host_link)
865846
redirected_endpoint = host_obj["Links"]["Endpoints"][0]["@odata.id"]
866-
#redirected_endpoint = "None" #for now, to test
867847

868848
if "Links" not in agent_bp_obj:
869849
agent_bp_obj["Links"] = {}

sunfish_plugins/storage/file_system_backend/backend_FS.py

Lines changed: 19 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -68,25 +68,32 @@ def write(self, payload: dict):
6868
parent_is_collection = True # default assumption
6969
last_parent_to_exist=""
7070

71-
print(f"BackendFS.write called on {id}")
71+
logging.info(f"BackendFS.write called on {id}")
7272
id = id.split('/')
7373
for index in range(2, len(id[1:])):
7474
to_check = os.path.join('/'.join(id[:index]), 'index.json')
7575
to_check = os.path.join(os.getcwd(), self.root, to_check)
76-
print(f"BackendFS.write(): path to check: {to_check}")
76+
logging.info(f"BackendFS.write(): path to check: {to_check}")
7777
if os.path.exists(to_check) is True:
7878
# capture this parent path as existing
7979
last_parent_to_exist = to_check
8080
if os.path.exists(to_check) is False:
81-
print("path does not exist\n")
81+
logging.info("path does not exist\n")
8282
# nice to know, but NOT an error!
83-
#raise ActionNotAllowed()
83+
# Log the situation and continue
84+
85+
86+
87+
# This particular code block looks unfinished and its purpose/functionality is unknown.
88+
# It looks as if part of this block was intended to fill in missing path elements and is redundant
89+
# with code just below this block. This block also sets a flag that is never used. - more analysis required.
90+
#
8491
'''
8592
with open(to_check, 'r') as data_json:
8693
data = json.load(data_json)
8794
data_json.close()
8895
if 'Collection' in data["@odata.type"]:
89-
print("path is to a Collection\n")
96+
logging.info("path is to a Collection\n")
9097
members = data["Members"]
9198
for x in members:
9299
if x["@odata.id"] == os.path.join(self.redfish_root, '/'.join(id[:index + 1])):
@@ -101,7 +108,7 @@ def write(self, payload: dict):
101108
present = True
102109
else:
103110
el["@odata.id"] = os.path.join(self.redfish_root, '/'.join(id[:index + 1]))
104-
print(f"BackendFS.write of {el['@odata.id']}")
111+
logging.info(f"BackendFS.write of {el['@odata.id']}")
105112
with open(to_check, 'w') as data_json:
106113
json.dump(data, data_json, indent=4, sort_keys=True)
107114
data_json.close()
@@ -117,7 +124,6 @@ def write(self, payload: dict):
117124
for i in range(0, last_element - 1):
118125
full_collection = full_collection + id[i] + '/'
119126

120-
#collection_type = os.path.join(full_collection, collection_type)
121127
full_collection = os.path.join(full_collection, collection_type)
122128

123129
collection_path = os.path.join(os.getcwd(), self.root,
@@ -128,23 +134,13 @@ def write(self, payload: dict):
128134
# check if the directory of the Collection already exists
129135
if not os.path.exists(collection_path):
130136
# if parent directory doesn't exist, we assume it is a collection and create the collection
131-
print(f"backendFS.write: making collection path directory")
137+
logging.info(f"backendFS.write: making collection path directory")
132138
os.makedirs(collection_path)
133139

134140
# the following line assumes the path element name dictates the collection type
135141
# it is more proper to examine the @odata.type property of the object being created!
136142
config = utils.generate_collection(collection_type)
137143

138-
# if the item to be written is managed by an agent, we want the collection containing it to also be marked
139-
# accordingly. We do this only for collections to be created because we assume that if the collection is
140-
# there already:
141-
# a. The collection is a first level one that is managed by Sunfish
142-
# b. The collection was previously created during an agent discovery process and therefore already marked
143-
# if "Oem" in payload and "Sunfish_RM" in payload["Oem"] and len(id) > 2 :
144-
# if "Oem" not in config:
145-
# config["Oem"] = {}
146-
# config["Oem"]["Sunfish_RM"] = payload["Oem"]["Sunfish_RM"]
147-
148144
## write file Resources/[folder]/index.json
149145
with open(os.path.join(collection_path, "index.json"), "w") as fd:
150146
fd.write(json.dumps(config, indent=4, sort_keys=True))
@@ -164,13 +160,13 @@ def write(self, payload: dict):
164160
parent_data = json.load(data_json)
165161
data_json.close()
166162
if 'Collection' in parent_data["@odata.type"]:
167-
print("parent path is to a Collection\n")
163+
logging.info("parent path is to a Collection\n")
168164
if utils.check_unique_id(index_path, payload['@odata.id']) is False:
169165
raise AlreadyExists(payload['@odata.id'])
170166
pass
171167
else:
172-
print("path is to an object\n")
173-
parent_is_collection = False #
168+
logging.info("path is to an object\n")
169+
parent_is_collection = False
174170
pass
175171

176172

@@ -240,7 +236,7 @@ def _update_object(self, payload: dict, replace: bool):
240236
Returns:
241237
str: id of the updated resource
242238
"""
243-
## code that re-write into file
239+
# code that re-write into file
244240
logging.info('BackendFS patch update called')
245241

246242
# get ID and collection from payload
@@ -280,7 +276,6 @@ def _update_object(self, payload: dict, replace: bool):
280276
raise ResourceNotFound(resource_id)
281277

282278
result: str = self.read(payload["@odata.id"])
283-
# result:str = payload['@odata.id']
284279

285280
return result
286281

@@ -297,7 +292,7 @@ def remove(self, path:str):
297292
Returns:
298293
str: confirmation string
299294
"""
300-
## code that removes a file
295+
# code that removes a file
301296
logging.info('BackendFS: remove called')
302297

303298
length = len(self.redfish_root)

0 commit comments

Comments
 (0)