Skip to content

Commit 18f2fc8

Browse files
authored
fixed Collection naming bug and ignore improper Redfish formats (#49)
Collection names were being improperly created due to a bug in the core library plug-in. Likewise, if a json file is returned during a ResourceCreated event that is missing the @odata.type or @odata.id properties, (a malformed Redfish object), the json file cannot be parsed by the Sunfish handlers, resulting in garbage in the Redfish resource trees. Added a check for both these properties. This is still no guarantee that the json response is Redfish compliant, but does prevent totally bogus json files from creating all kinds of odd error messages and incorrect Redfish tree structures.
2 parents 3e66f68 + 4bd9199 commit 18f2fc8

File tree

2 files changed

+48
-60
lines changed

2 files changed

+48
-60
lines changed

sunfish_plugins/events_handlers/redfish/redfish_event_handler.py

Lines changed: 20 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,6 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event
4646
response = response.json()
4747

4848
### Save agent registration
49-
# connection_method_name = connectionMethodId.split('/')[-1]
50-
# connection_method_name = connectionMethodId[:-len(connection_method_name)]
5149
event_handler.core.storage_backend.write(response)
5250

5351
aggregation_source_id = str(uuid.uuid4())
@@ -85,8 +83,6 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont
8583
# sunfishAliasDB contains renaming data, the alias xref array, the boundaryLink
8684
# data, and assorted flags that are used during upload renaming and final merge of
8785
# boundary components based on boundary links.
88-
89-
#
9086
#
9187

9288
logger.info("New resource created")
@@ -149,7 +145,6 @@ def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context
149145
#
150146
logger.info("TriggerEvent method called")
151147
file_to_send = event['MessageArgs'][0] # relative Resource Path
152-
#file_path = os.path.join(self.conf['redfish_root'], file_to_send)
153148
hostname = event['MessageArgs'][1] # target address
154149
destination = hostname + "/EventListener" # may match a Subscription object's 'Destination' property
155150
logger.debug(f"path of file_to_send is {file_to_send}")
@@ -299,7 +294,6 @@ def forward_event(self, list, payload):
299294
Returns:
300295
list: list of all the reachable subcribers for the event.
301296
"""
302-
# resp = 400
303297

304298
for id in list:
305299
path = os.path.join(self.redfish_root, 'EventService', 'Subscriptions', id)
@@ -403,39 +397,24 @@ def handleNestedObject(self, obj):
403397
# this needs to be done on ALL agents, not just the one we just uploaded
404398
RedfishEventHandler.updateAllAgentsRedirectedLinks(self)
405399

406-
return visited #why not the 'fetched' list?
400+
return visited
407401

408402
def create_uploaded_object(self, path: str, payload: dict):
409403
# before to add the ID and to call the methods there should be the json validation
410404

411405
# generate unique uuid if is not present
412406
if '@odata.id' not in payload and 'Id' not in payload:
413407
pass
414-
#id = str(uuid.uuid4())
415-
#to_add = {
416-
#'Id': id,
417-
#'@odata.id': os.path.join(path, id)
418-
#}
419-
#payload.update(to_add)
420408
raise exception(f"create_uploaded_object: no Redfish ID (@odata.id) found")
421409

422-
#object_type = self._get_type(payload)
423410
# we assume agents can upload collections, just not the root level collections
424411
# we will check for uploaded collections later
425-
#if "Collection" in object_type:
426-
#raise CollectionNotSupported()
427412

428413
payload_to_write = payload
429414

430415
try:
431-
# 1. check the path target of the operation exists
432-
# self.storage_backend.read(path)
433-
# 2. we don't check the manager; we assume uploading agent is the manager unless it says otherwise
434-
#agent_response = self.objects_manager.forward_to_manager(SunfishRequestType.CREATE, path, payload=payload)
435-
#if agent_response:
436-
#payload_to_write = agent_response
437-
# 3. should be no custom handler, this is not a POST, we upload the objects directly into the Redfish database
438-
#self.objects_handler.dispatch(object_type, path, SunfishRequestType.CREATE, payload=payload)
416+
# this would be another location to verify new object to be written
417+
# meets Sunfish and Redfish requirements
439418
pass
440419
except ResourceNotFound:
441420
logger.error("The collection where the resource is to be created does not exist.")
@@ -445,7 +424,7 @@ def create_uploaded_object(self, path: str, payload: dict):
445424
# The object does not have a handler.
446425
logger.debug(f"The object {object_type} does not have a custom handler")
447426
pass
448-
# 4. persist change in Sunfish tree
427+
# persist change in Sunfish tree
449428
return self.storage_backend.write(payload_to_write)
450429

451430
def get_aggregation_source(self, aggregation_source):
@@ -499,12 +478,21 @@ def fetchResource(self, obj_id, aggregation_source):
499478

500479
if response.status_code == 200: # Agent must have returned this object
501480
redfish_obj = response.json()
502-
503-
# now rename if necessary and copy object into Sunfish inventory
504-
redfish_obj = RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source)
505-
if redfish_obj['@odata.id'] not in aggregation_source["Links"]["ResourcesAccessed"]:
506-
aggregation_source["Links"]["ResourcesAccessed"].append(redfish_obj['@odata.id'])
507-
return redfish_obj
481+
# however, it must be a minimally valid object
482+
# This would be a great spot to insert a call to a Redfish schema validation function
483+
# that could return a grading of this new redfish_obj: [PASS, FAIL, CAUTIONS]
484+
# However, we are debugging not just code, but also new Redfish schema,
485+
# so for now we just test for two required Redfish Properties to help weed out obviously incorrect responses
486+
if '@odata.id' in redfish_obj and '@odata.type' in redfish_obj:
487+
488+
# now rename if necessary and copy object into Sunfish inventory
489+
redfish_obj = RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source)
490+
if redfish_obj['@odata.id'] not in aggregation_source["Links"]["ResourcesAccessed"]:
491+
aggregation_source["Links"]["ResourcesAccessed"].append(redfish_obj['@odata.id'])
492+
return redfish_obj
493+
else:
494+
# we treat this as an unsuccessful retrieval
495+
return
508496
else: # Agent did not successfully return the obj_id sought
509497
# we still need to check the obj_id for an aliased parent segment
510498
# so we detect renamed navigation links
@@ -517,6 +505,7 @@ def createInspectedObject(self,redfish_obj, aggregation_source):
517505
if '@odata.id' in redfish_obj:
518506
obj_path = os.path.relpath(redfish_obj['@odata.id'], self.conf['redfish_root'])
519507
else:
508+
# we shouldn't allow an improper object to be passed in, so let's take an exception
520509
raise PropertyNotFound(f"missing @odata.id in \n {json.dumps(redfish_obj, indent=2)}")
521510

522511
file_path = os.path.join(self.conf['redfish_root'], obj_path)
@@ -601,7 +590,6 @@ def createInspectedObject(self,redfish_obj, aggregation_source):
601590
if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort":
602591
RedfishEventHandler.track_boundary_port(self, redfish_obj, aggregation_source)
603592
# is this new object a new fabric object with same fabric UUID as an existing fabric?
604-
# RedfishEventHandler.checkForAliasedFabrics(self, redfish_obj, aggregation_source)
605593
RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj)
606594

607595
return redfish_obj
@@ -856,7 +844,6 @@ def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB):
856844
# extract the Endpoint URI associated with this parent object
857845
host_obj = self.storage_backend.read(host_link)
858846
redirected_endpoint = host_obj["Links"]["Endpoints"][0]["@odata.id"]
859-
#redirected_endpoint = "None" #for now, to test
860847

861848
if "Links" not in agent_bp_obj:
862849
agent_bp_obj["Links"] = {}

sunfish_plugins/storage/file_system_backend/backend_FS.py

Lines changed: 28 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -66,22 +66,34 @@ def write(self, payload: dict):
6666
length = len(self.redfish_root)
6767
id = payload['@odata.id'][length:] # id without redfish.root (es. /redfish/v1/)
6868
parent_is_collection = True # default assumption
69+
last_parent_to_exist=""
6970

70-
print(f"BackendFS.write called on {id}")
71+
logging.info(f"BackendFS.write called on {id}")
7172
id = id.split('/')
7273
for index in range(2, len(id[1:])):
7374
to_check = os.path.join('/'.join(id[:index]), 'index.json')
7475
to_check = os.path.join(os.getcwd(), self.root, to_check)
75-
print(f"BackendFS.write(): path to check: {to_check}")
76+
logging.info(f"BackendFS.write(): path to check: {to_check}")
77+
if os.path.exists(to_check) is True:
78+
# capture this parent path as existing
79+
last_parent_to_exist = to_check
7680
if os.path.exists(to_check) is False:
77-
print("path does not exist\n")
78-
raise ActionNotAllowed()
81+
logging.info("path does not exist\n")
82+
# nice to know, but NOT an error!
83+
# Log the situation and continue
84+
85+
86+
87+
# This particular code block looks unfinished and its purpose/functionality is unknown.
88+
# It looks as if part of this block was intended to fill in missing path elements and is redundant
89+
# with code just below this block. This block also sets a flag that is never used. - more analysis required.
90+
#
7991
'''
8092
with open(to_check, 'r') as data_json:
8193
data = json.load(data_json)
8294
data_json.close()
8395
if 'Collection' in data["@odata.type"]:
84-
print("path is to a Collection\n")
96+
logging.info("path is to a Collection\n")
8597
members = data["Members"]
8698
for x in members:
8799
if x["@odata.id"] == os.path.join(self.redfish_root, '/'.join(id[:index + 1])):
@@ -96,13 +108,13 @@ def write(self, payload: dict):
96108
present = True
97109
else:
98110
el["@odata.id"] = os.path.join(self.redfish_root, '/'.join(id[:index + 1]))
99-
print(f"BackendFS.write of {el['@odata.id']}")
111+
logging.info(f"BackendFS.write of {el['@odata.id']}")
100112
with open(to_check, 'w') as data_json:
101113
json.dump(data, data_json, indent=4, sort_keys=True)
102114
data_json.close()
103115
104116
'''
105-
# we get here only if all grandparent objects exist
117+
# we get here only if at least one grandparent objects exist
106118
last_element = len(id) - 1
107119
collection_type = id[last_element - 1]
108120
resource_id = id[last_element]
@@ -112,33 +124,23 @@ def write(self, payload: dict):
112124
for i in range(0, last_element - 1):
113125
full_collection = full_collection + id[i] + '/'
114126

115-
collection_type = os.path.join(full_collection, collection_type)
127+
full_collection = os.path.join(full_collection, collection_type)
116128

117129
collection_path = os.path.join(os.getcwd(), self.root,
118-
collection_type) # collection_path .../Resources/[folder], collection_type = [folder]
119-
parent_path = os.path.dirname(collection_path) # parent path .../Resources
130+
full_collection)
131+
parent_path = os.path.dirname(collection_path)
120132

121133
#pdb.set_trace()
122134
# check if the directory of the Collection already exists
123135
if not os.path.exists(collection_path):
124136
# if parent directory doesn't exist, we assume it is a collection and create the collection
125-
print(f"backendFS.write: making collection path directory")
137+
logging.info(f"backendFS.write: making collection path directory")
126138
os.makedirs(collection_path)
127139

128140
# the following line assumes the path element name dictates the collection type
129141
# it is more proper to examine the @odata.type property of the object being created!
130142
config = utils.generate_collection(collection_type)
131143

132-
# if the item to be written is managed by an agent, we want the collection containing it to also be marked
133-
# accordingly. We do this only for collections to be created because we assume that if the collection is
134-
# there already:
135-
# a. The collection is a first level one that is managed by Sunfish
136-
# b. The collection was previously created during an agent discovery process and therefore already marked
137-
# if "Oem" in payload and "Sunfish_RM" in payload["Oem"] and len(id) > 2 :
138-
# if "Oem" not in config:
139-
# config["Oem"] = {}
140-
# config["Oem"]["Sunfish_RM"] = payload["Oem"]["Sunfish_RM"]
141-
142144
## write file Resources/[folder]/index.json
143145
with open(os.path.join(collection_path, "index.json"), "w") as fd:
144146
fd.write(json.dumps(config, indent=4, sort_keys=True))
@@ -158,13 +160,13 @@ def write(self, payload: dict):
158160
parent_data = json.load(data_json)
159161
data_json.close()
160162
if 'Collection' in parent_data["@odata.type"]:
161-
print("parent path is to a Collection\n")
163+
logging.info("parent path is to a Collection\n")
162164
if utils.check_unique_id(index_path, payload['@odata.id']) is False:
163165
raise AlreadyExists(payload['@odata.id'])
164166
pass
165167
else:
166-
print("path is to an object\n")
167-
parent_is_collection = False #
168+
logging.info("path is to an object\n")
169+
parent_is_collection = False
168170
pass
169171

170172

@@ -234,7 +236,7 @@ def _update_object(self, payload: dict, replace: bool):
234236
Returns:
235237
str: id of the updated resource
236238
"""
237-
## code that re-write into file
239+
# code that re-write into file
238240
logging.info('BackendFS patch update called')
239241

240242
# get ID and collection from payload
@@ -274,7 +276,6 @@ def _update_object(self, payload: dict, replace: bool):
274276
raise ResourceNotFound(resource_id)
275277

276278
result: str = self.read(payload["@odata.id"])
277-
# result:str = payload['@odata.id']
278279

279280
return result
280281

@@ -291,7 +292,7 @@ def remove(self, path:str):
291292
Returns:
292293
str: confirmation string
293294
"""
294-
## code that removes a file
295+
# code that removes a file
295296
logging.info('BackendFS: remove called')
296297

297298
length = len(self.redfish_root)

0 commit comments

Comments
 (0)