diff --git a/sunfish/lib/core.py b/sunfish/lib/core.py index 74a6606..27d7f79 100644 --- a/sunfish/lib/core.py +++ b/sunfish/lib/core.py @@ -232,6 +232,7 @@ def replace_object(self, path: str, payload: dict): str|exception: return the replaced resource or an exception in case of fault. """ object_type = self._get_type(payload, path=path) + payload_to_write = payload # we assume no changes can be done on collections if "Collection" in object_type: raise CollectionNotSupported() @@ -239,9 +240,12 @@ def replace_object(self, path: str, payload: dict): # 1. check the path target of the operation exists self.storage_backend.read(path) # 2. is needed first forward the request to the agent managing the object - self.objects_manager.forward_to_manager(SunfishRequestType.REPLACE, path, payload=payload) + #self.objects_manager.forward_to_manager(SunfishRequestType.REPLACE, path, payload=payload) + agent_response = self.objects_manager.forward_to_manager(SunfishRequestType.REPLACE, path, payload=payload) + if agent_response: + payload_to_write = agent_response # 3. Execute any custom handler for this object type - self.objects_handler.dispatch(object_type, path, SunfishRequestType.REPLACE, payload=payload) + self.objects_handler.dispatch(object_type, path, SunfishRequestType.REPLACE, payload=payload_to_write) except ResourceNotFound: logger.error(logger.error(f"The resource to be replaced ({path}) does not exist.")) except AttributeError: @@ -249,7 +253,7 @@ def replace_object(self, path: str, payload: dict): logger.debug(f"The object {object_type} does not have a custom handler") pass # 4. persist change in Sunfish tree - return self.storage_backend.replace(payload) + return self.storage_backend.replace(payload_to_write) def patch_object(self, path: str, payload: dict): """Calls the correspondent patch function from the backend implementation. @@ -261,6 +265,7 @@ def patch_object(self, path: str, payload: dict): str|exception: return the updated resource or an exception in case of fault. """ # we assume no changes can be done on collections + payload_to_write = payload obj = self.storage_backend.read(path) object_type = self._get_type(obj, path=path) if "Collection" in object_type: @@ -269,7 +274,10 @@ def patch_object(self, path: str, payload: dict): # 1. check the path target of the operation exists self.storage_backend.read(path) # 2. is needed first forward the request to the agent managing the object - self.objects_manager.forward_to_manager(SunfishRequestType.PATCH, path, payload=payload) + #self.objects_manager.forward_to_manager(SunfishRequestType.PATCH, path, payload=payload) + agent_response = self.objects_manager.forward_to_manager(SunfishRequestType.PATCH, path, payload=payload) + if agent_response: + payload_to_write = agent_response # 3. Execute any custom handler for this object type self.objects_handler.dispatch(object_type, path, SunfishRequestType.PATCH, payload=payload) except ResourceNotFound: @@ -280,7 +288,7 @@ def patch_object(self, path: str, payload: dict): pass # 4. persist change in Sunfish tree - return self.storage_backend.patch(path, payload) + return self.storage_backend.patch(path, payload_to_write) def delete_object(self, path: string): """Calls the correspondent remove function from the backend implementation. Checks that the path is valid. diff --git a/sunfish/storage/backend_interface.py b/sunfish/storage/backend_interface.py index 7141952..b1ee9c3 100644 --- a/sunfish/storage/backend_interface.py +++ b/sunfish/storage/backend_interface.py @@ -1,4 +1,5 @@ # Copyright IBM Corp. 2023 +# Copyright Hewlett Packard Enterprise Development LP 2024 # This software is available to you under a BSD 3-Clause License. # The full license terms are available here: https://github.com/OpenFabrics/sunfish_library_reference/blob/main/LICENSE @@ -22,4 +23,8 @@ def patch(): @abstractmethod def remove(): - pass \ No newline at end of file + pass + + @abstractmethod + def reset_resources(): + pass diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index a4a2f43..3cb258b 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -1,4 +1,5 @@ # Copyright IBM Corp. 2023 +# Copyright Hewlett Packard Enterprise Development LP 2024 # This software is available to you under a BSD 3-Clause License. # The full license terms are available here: https://github.com/OpenFabrics/sunfish_library_reference/blob/main/LICENSE import json @@ -6,6 +7,9 @@ import os import uuid import warnings +import shutil +from uuid import uuid4 +import pdb import requests from sunfish.events.event_handler_interface import EventHandlerInterface @@ -13,6 +17,7 @@ from sunfish.lib.exceptions import * logger = logging.getLogger("RedfishEventHandler") +logging.basicConfig(level=logging.DEBUG) class RedfishEventHandlersTable: @@ -22,8 +27,8 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event # Fabric Agents are modelled as AggregationSource objects (RedFish v2023.1 at the time of writing this comment) # Registration will happen with the OFMF receiving a and event with MessageId: AggregationSourceDiscovered # The arguments of the event message are: - # - Arg1: "Redfish" - # - Arg2: "agent_ip:port" + # - Arg0: "Redfish" + # - Arg1: "agent_ip:port" # I am also assuming that the agent name to be used is contained in the OriginOfCondifiton field of the event as in the below example: # { # "OriginOfCondition: [ @@ -48,7 +53,7 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event aggregation_source_id = str(uuid.uuid4()) aggregation_source_template = { "@odata.type": "#AggregationSource.v1_2_.AggregationSource", - "@odata.id": f"{event_handler.core.conf['redfish_root']}/AggregationService/AggregationSources/{aggregation_source_id}", + "@odata.id": f"{event_handler.core.conf['redfish_root']}AggregationService/AggregationSources/{aggregation_source_id}", "HostName": hostname, "Id": aggregation_source_id, "Links": { @@ -72,42 +77,132 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event @classmethod def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, context: str): + # incoming context (an aggregation_source ID) comes from event sender + #pdb.set_trace() if context == "": raise PropertyNotFound("Missing agent context in ResourceCreated event") + # put the global definition and initial loading of sunfishAliasDB dictionary here + # sunfishAliasDB contains renaming data, the alias xref array, the boundaryLink + # data, and assorted flags that are used during upload renaming and final merge of + # boundary components based on boundary links. + + # + # logger.info("New resource created") - id = event['OriginOfCondition']['@odata.id'] # /redfish/v1/Fabrics/CXL - aggregation_source = event_handler.core.storage_backend.read( - os.path.join(event_handler.core.conf["redfish_root"], + id = event['OriginOfCondition']['@odata.id'] # ex: /redfish/v1/Fabrics/CXL + logger.info(f"aggregation_source's redfish URI: {id}") + # must have an aggregation_source object to assign as owner of new resource + agg_src_path = os.path.join(os.getcwd(), event_handler.core.conf["backend_conf"]["fs_root"], "AggregationService", "AggregationSources", context) - ) + if os.path.exists(agg_src_path): + aggregation_source = event_handler.core.storage_backend.read(agg_src_path) + else: + raise PropertyNotFound("Cannot find aggregation source; file does not exist") + # fetch the actual resource to be created from agent hostname = aggregation_source["HostName"] - response = requests.get(f"{hostname}/{id}") + if response.status_code != 200: - raise Exception("Cannot find ConnectionMethod") + raise ResourceNotFound("Aggregation source read from Agent failed") response = response.json() - - add_aggregation_source_reference(response, aggregation_source) + logger.info(f"new resource is \n") + logger.info(json.dumps(response, indent=4)) # here we are assuming that we are getting a fully populated redfish - # object from the agent. + # object from the agent. Add real tests here! if "@odata.id" not in response: + # should never hit this! logger.warning(f"Resource {id} did not have @odata.id set when retrieved from Agent. Initializing its value with {id}") response["odata.id"] = id - event_handler.core.storage_backend.write(response) - - RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) + # New resource should not exist in Sunfish inventory + length = len(event_handler.core.conf["redfish_root"]) + resource = response["@odata.id"][length:] + fs_full_path = os.path.join(os.getcwd(), event_handler.core.conf["backend_conf"]["fs_root"], + resource, 'index.json') + if not os.path.exists(fs_full_path): + RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) + else: + logger.warning(f"resource to create: {id} already exists.") + # could be a second agent with naming conflicts, or same agent with duplicate + # still run the inspection process on it to find cause of warning + RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) + + # patch the aggregation_source object in storage with all the new resources found event_handler.core.storage_backend.patch(id, aggregation_source) + return 200 + @classmethod + def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context: str): + ### + # Receipt of this event will cause the core library to retrieve and send a specific event to a specific target + # This will happen upon the API receiving an event with MessageId: TriggerEvent + # The arguments of the event message are: + # - Arg0: "EventDescriptor" --relative OS Filesystem path from core library application home directory + # - Arg1: "target_IP:port" + # there is no protection on the inadvertant receipt of this event + # + logger.info("TriggerEvent method called") + file_to_send = event['MessageArgs'][0] # relative Resource Path + #file_path = os.path.join(self.conf['redfish_root'], file_to_send) + hostname = event['MessageArgs'][1] # target address + destination = hostname + "/EventListener" # may match a Subscription object's 'Destination' property + logger.debug(f"path of file_to_send is {file_to_send}") + #pdb.set_trace() + try: + if os.path.exists(file_to_send): + with open(file_to_send, 'r') as data_json: + event_to_send = json.load(data_json) + data_json.close() + + logger.debug("found the event file") + + if event_to_send["Context"] == "": + logger.debug("no context in template event") + # don't fill it in, send the NULL + pass + elif event_to_send["Context"] == "None": + logger.debug("template event uses subscriber assigned Context") + # check if the Destination for this event is a registered subscriber + # use as "Context" of this the event_to_send, or use NULL if not found + event_to_send["Context"] = RedfishEventHandler.find_subscriber_context(event_handler.core, destination) + pass + + logger.debug(f"event_to_send\n {event_to_send}" ) + try: + # send the event as a POST to the EventListener + response = requests.post(destination,json=event_to_send) + if response.status_code != 200: + logger.debug(f"Destination returned code {response.status_code}") + return response + else: + logger.info(f"TriggerEvents Succeeded: code {response.status_code}") + return response + except Exception: + raise Exception(f"Event forwarding to destination {destination} failed.") + response = 500 + return response + + else: + logger.error(f"file not found: {file_to_send} ") + response = 404 + return response + except Exception: + raise Exception("TriggerEvents Failed") + resp = 500 + return resp + + + class RedfishEventHandler(EventHandlerInterface): dispatch_table = { "AggregationSourceDiscovered": RedfishEventHandlersTable.AggregationSourceDiscovered, - "ResourceCreated": RedfishEventHandlersTable.ResourceCreated + "ResourceCreated": RedfishEventHandlersTable.ResourceCreated, + "TriggerEvent": RedfishEventHandlersTable.TriggerEvent } def __init__(self, core): @@ -119,6 +214,7 @@ def __init__(self, core): self.core = core self.redfish_root = core.conf["redfish_root"] self.fs_root = core.conf["backend_conf"]["fs_root"] + self.fs_SunfishPrivate = core.conf["backend_conf"]["fs_private"] self.subscribers_root = core.conf["backend_conf"]["subscribers_root"] @classmethod def dispatch(cls, message_id: str, event_handler: EventHandlerInterface, event: dict, context: str): @@ -207,7 +303,6 @@ def forward_event(self, list, payload): path = os.path.join(self.redfish_root, 'EventService', 'Subscriptions', id) try: data = self.core.storage_backend.read(path) - # print('send to: ', data["Id"]) resp = requests.post(data['Destination'], json=payload) resp.raise_for_status() except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e: @@ -232,10 +327,35 @@ def check_subdirs(self, origin): return to_forward + def find_subscriber_context(self, destination): + # look up the subscriber's "Context" for the given event Destination + #pdb.set_trace() + context = "" + try: + subscribers_list = self.storage_backend.read( + os.path.join(self.conf["redfish_root"], + "EventService", "Subscriptions") + ) + logger.debug(f"subscribers: {subscribers_list}") + for member in subscribers_list['Members']: + logger.debug(f"checking {member}") + subscriber = self.storage_backend.read(member["@odata.id"]) + if subscriber['Destination'] == destination: + context=subscriber['Context'] + logger.info(f"Found matching Destination in {member}") + + except Exception: + logger.info(f"failed to find a matching Destination") + + return context + + def bfsInspection(self, node, aggregation_source): queue = [] visited = [] fetched = [] + notfound = [] + uploaded = [] visited.append(node['@odata.id']) queue.append(node['@odata.id']) @@ -248,28 +368,83 @@ def handleNestedObject(self, obj): for key,value in obj.items(): if key == '@odata.id': RedfishEventHandler.handleEntryIfNotVisited(self, value, visited, queue) - elif type(value) == list or type(value) == dict: - handleNestedObject(self, value) + elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): + handleNestedObject(self, value) # need to ignore Sunfish_RM paths; they are wrong namespace while queue: queue = sorted(queue) id = queue.pop(0) - redfish_obj = RedfishEventHandler.fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched) + redfish_obj = RedfishEventHandler.fetchResourceAndTree(self, id, aggregation_source, \ + visited, queue, fetched) + if redfish_obj is None: # we failed to locate it in aggregation_source + notfound.append(id) if redfish_obj is None or type(redfish_obj) != dict: logger.info(f"Resource - {id} - not available") continue for key, val in redfish_obj.items(): - if key == 'Links': - if type(val)==dict or type(val)==list: - handleNestedObject(self, val) if key == '@odata.id': - RedfishEventHandler.handleEntryIfNotVisited(self, val, visited, queue) pass - if type(val) == list or type(val) == dict: + # keep extracting nested @odata.id references from the currently fetched object + elif type(val) == list or type(val) == dict: handleNestedObject(self, val) - return visited + logger.info("\n\nattempted to fetch the following URIs:\n") + logger.info(json.dumps(sorted(fetched),indent = 4)) + logger.info("\n\nAgent did not return objects for the following URIs:\n") + logger.info(json.dumps(sorted(notfound),indent = 4)) + + # now need to revisit all uploaded objects and update any links renamed after + # the uploaded object was written + RedfishEventHandler.updateAllAliasedLinks(self,aggregation_source) + # now we need to re-direct any boundary port link references + # this needs to be done on ALL agents, not just the one we just uploaded + RedfishEventHandler.updateAllAgentsRedirectedLinks(self) + + return visited #why not the 'fetched' list? + + def create_uploaded_object(self, path: str, payload: dict): + # before to add the ID and to call the methods there should be the json validation + + # generate unique uuid if is not present + if '@odata.id' not in payload and 'Id' not in payload: + pass + #id = str(uuid.uuid4()) + #to_add = { + #'Id': id, + #'@odata.id': os.path.join(path, id) + #} + #payload.update(to_add) + raise exception(f"create_uploaded_object: no Redfish ID (@odata.id) found") + + #object_type = self._get_type(payload) + # we assume agents can upload collections, just not the root level collections + # we will check for uploaded collections later + #if "Collection" in object_type: + #raise CollectionNotSupported() + + payload_to_write = payload + + try: + # 1. check the path target of the operation exists + # self.storage_backend.read(path) + # 2. we don't check the manager; we assume uploading agent is the manager unless it says otherwise + #agent_response = self.objects_manager.forward_to_manager(SunfishRequestType.CREATE, path, payload=payload) + #if agent_response: + #payload_to_write = agent_response + # 3. should be no custom handler, this is not a POST, we upload the objects directly into the Redfish database + #self.objects_handler.dispatch(object_type, path, SunfishRequestType.CREATE, payload=payload) + pass + except ResourceNotFound: + logger.error("The collection where the resource is to be created does not exist.") + except AgentForwardingFailure as e: + raise e + except AttributeError: + # The object does not have a handler. + logger.debug(f"The object {object_type} does not have a custom handler") + pass + # 4. persist change in Sunfish tree + return self.storage_backend.write(payload_to_write) def get_aggregation_source(self, aggregation_source): try: @@ -298,31 +473,44 @@ def fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched): for node_position in range(4, len(path_nodes) - 1): redfish_path = f'/redfish/v1/{"/".join(path_nodes[3:node_position + 1])}' logger.info(f"Checking redfish path: {redfish_path}") - if redfish_path not in visited: + if redfish_path not in visited: need_parent_prefetch = True logger.info(f"Inspect redfish path: {redfish_path}") queue.append(redfish_path) visited.append(redfish_path) - if need_parent_prefetch: # requeue + if need_parent_prefetch: # requeue this id and return 'None' queue.append(id) - else: + else: # all grand-parent objects have been visited + # go get this object from the aggregation_source + # fetchResource() will also create the Sunfish copy, if appropriate redfish_obj = RedfishEventHandler.fetchResource(self, id, aggregation_source) fetched.append(id) return redfish_obj def fetchResource(self, obj_id, aggregation_source): + # only called if all grand-parent objects have been put in queue, sorted, inspected, and already fetched. + # The parent object, if not a collection, will also have already been fetched + # this routine will also call create and/or merge the object into Sunfish database resource_endpoint = aggregation_source["HostName"] + obj_id logger.info(f"fetch: {resource_endpoint}") response = requests.get(resource_endpoint) - if response.status_code == 200: + if response.status_code == 200: # Agent must have returned this object redfish_obj = response.json() - RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source) + # now rename if necessary and copy object into Sunfish inventory + redfish_obj = RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source) if redfish_obj['@odata.id'] not in aggregation_source["Links"]["ResourcesAccessed"]: aggregation_source["Links"]["ResourcesAccessed"].append(redfish_obj['@odata.id']) return redfish_obj - + else: # Agent did not successfully return the obj_id sought + # we still need to check the obj_id for an aliased parent segment + # so we detect renamed navigation links + sunfish_aliased_URI = RedfishEventHandler.xlateToSunfishPath(self, obj_id, aggregation_source) + if obj_id != sunfish_aliased_URI: + RedfishEventHandler.updateSunfishAliasDB(self, sunfish_aliased_URI, obj_id, aggregation_source) + + def createInspectedObject(self,redfish_obj, aggregation_source): if '@odata.id' in redfish_obj: obj_path = os.path.relpath(redfish_obj['@odata.id'], self.conf['redfish_root']) @@ -330,25 +518,728 @@ def createInspectedObject(self,redfish_obj, aggregation_source): raise PropertyNotFound(f"missing @odata.id in \n {json.dumps(redfish_obj, indent=2)}") file_path = os.path.join(self.conf['redfish_root'], obj_path) + logger.debug(f"try creating agent-named object: {file_path}") - if 'Collection' not in redfish_obj['@odata.type']: - try: - if self.get_object(file_path) == redfish_obj: + agent_redfish_URI = redfish_obj['@odata.id'] + sunfish_aliased_URI = RedfishEventHandler.xlateToSunfishPath(self, agent_redfish_URI, aggregation_source) + # @odata.id is the Agent-proposed path name, but we need to search for the Sunfish (aliased) name. + # becomes part of xlateToSunfishObj(self, agent_obj,aggregation_source) -> translated_agent_obj + # if Sunfish has aliased the object URI, we need to update the object before we write it! + if agent_redfish_URI != sunfish_aliased_URI: + redfish_obj['@odata.id'] = sunfish_aliased_URI + RedfishEventHandler.updateSunfishAliasDB(self, sunfish_aliased_URI, agent_redfish_URI, aggregation_source) + if 'Id' in redfish_obj: + if redfish_obj['Id'] == agent_redfish_URI.split("/")[-1]: + redfish_obj['Id'] = sunfish_aliased_URI.split("/")[-1] + logger.debug(f"xlated agent_redfish_URI is {sunfish_aliased_URI}") + if 'Collection' in redfish_obj['@odata.type']: + logger.debug("This is a collection, ignore it until we need it") + pass + else: + # use Sunfish (aliased) paths for conflict testing if it exists + obj_path = os.path.relpath(sunfish_aliased_URI, self.conf['redfish_root']) + fs_full_path = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_root"], obj_path, 'index.json') + file_path = os.path.join(self.conf['redfish_root'], obj_path) + + if os.path.exists(fs_full_path): + uploading_agent_uri= aggregation_source["@odata.id"] + existing_obj = self.get_object(file_path) + modified_existing_obj = False + existing_agent_uri = existing_obj["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] + logger.debug(f"managingAgent of Sunfish {obj_path} is {uploading_agent_uri}") + if existing_agent_uri == uploading_agent_uri: + # reject this duplicate posting of the object from same agent + # note we don't update the object + warnings.warn('Duplicate Resource found, ignored') pass - elif self.get_object(file_path) != redfish_obj: - warnings.warn('Resource state changed') - except ResourceNotFound: + else: + # is object a Fabric? + obj_type = redfish_obj["@odata.type"].split('.')[0] + obj_type = obj_type.replace("#","") # #Fabric -> Fabric + + if obj_type == 'Fabric': + # is the conflicting Fabric object the same Fabric Object? + if "UUID" in redfish_obj and "UUID" in existing_obj: + if redfish_obj['UUID'] == existing_obj['UUID']: + # assume new Fabric object is the same as existing one + # because aggregation_sources are cooperating + # So, do not post this newly uploaded copy + # However, do update existing object with new 'sharer agent' + modified_existing_obj =RedfishEventHandler.updateIfMergedFabrics(self,redfish_obj, \ + uploading_agent_uri, existing_obj) + if modified_existing_obj: + self.storage_backend.replace(existing_obj) + logger.info(f"----- updated (replaced) existing fabric object") + else: + # different fabrics, just rename the new one + redfish_obj = RedfishEventHandler.renameUploadedObject(self, redfish_obj, aggregation_source) + add_aggregation_source_reference(redfish_obj, aggregation_source) + logger.info(f"creating object: {file_path}") + RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + else: + # assume different fabrics, just rename the new one + redfish_obj = RedfishEventHandler.renameUploadedObject(self, redfish_obj, aggregation_source) + add_aggregation_source_reference(redfish_obj, aggregation_source) + logger.info(f"creating object: {file_path}") + RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + else: + # we have a simple name conflict on a non-Fabric object + # find new name, build xref, check boundary ports and create the new object + redfish_obj = RedfishEventHandler.renameUploadedObject(self, redfish_obj, aggregation_source) + add_aggregation_source_reference(redfish_obj, aggregation_source) + logger.info(f"creating object: {file_path}") + if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": + RedfishEventHandler.track_boundary_port(self, redfish_obj, aggregation_source) + RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + + + else: # assume new object, create it and its parent collection if needed add_aggregation_source_reference(redfish_obj, aggregation_source) - self.create_object(file_path, redfish_obj) + logger.info(f"creating object: {file_path}") + if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": + RedfishEventHandler.track_boundary_port(self, redfish_obj, aggregation_source) + # is this new object a new fabric object with same fabric UUID as an existing fabric? + # RedfishEventHandler.checkForAliasedFabrics(self, redfish_obj, aggregation_source) + RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + + return redfish_obj + + def xlateToSunfishPath(self,agent_path, aggregation_source): + # redfish_obj uses agent namespace + # aggregation_source is an object in the Sunfish namespace + # will eventually replace file read & load of aliasDB with aliasDB passed in as arg + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + logger.error(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + agentGiven_segments = agent_path.split("/") + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + logger.debug(f"agent id: {owning_agent_id}") + # check if owning_agent has any aliases assigned + if owning_agent_id in uri_aliasDB["Agents_xref_URIs"]: + logger.debug(f"xlating Agent path : {agent_path}") + agentFinal_obj_path = "" + for i in range(1,len(agentGiven_segments)): + agentFinal_obj_path=agentFinal_obj_path +"/"+ agentGiven_segments[i] + # test this path segment + if agentFinal_obj_path in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"]: + # need to replace agent_path built to this point with sunfish alias + sunfishAliasPath = uri_aliasDB["Agents_xref_URIs"][owning_agent_id] \ + ["aliases"][agentFinal_obj_path] + agentFinal_obj_path = sunfishAliasPath + logger.debug(f"aliased path is {agentFinal_obj_path}") + # next segment + agent_path = agentFinal_obj_path + return agent_path + + def updateAllAliasedLinks(self,aggregation_source): + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + logger.error(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + logger.debug(f"updating all objects for : {owning_agent_id}") + + agent_uploads=[] + # for every aggregation_source with aliased links: + if owning_agent_id in uri_aliasDB['Agents_xref_URIs']: + # grab the k,v aliases structure and the list of URIs for owned objects + if 'aliases' in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]: + agent_aliases = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['aliases'] + agent_uploads = aggregation_source["Links"]["ResourcesAccessed"] + + # update all the objects + for upload_obj_URI in agent_uploads: + logger.debug(f"updating links in obj: {upload_obj_URI}") + RedfishEventHandler.updateObjectAliasedLinks(self, upload_obj_URI, agent_aliases) + + return + + def updateObjectAliasedLinks(self, object_URI, agent_aliases): + + def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): + #pdb.set_trace() + nestedPaths = [] + if type(obj) == list: + i = 0; + for entry in obj: + if type(entry) == list or type(entry) == dict: + nestedPaths.extend( findNestedURIs(self, URI_to_match, URI_to_sub, entry, path_to_nested_URI+"["+str(i)+"]")) + else: + i=i+1 + if type(obj) == dict: + for key,value in obj.items(): + if key == '@odata.id'and path_to_nested_URI != "": + # check @odata.id: value for an alias + if value == URI_to_match: + logger.info(f"modifying {value} to {URI_to_sub}") + obj[key] = URI_to_sub + nestedPaths.append(path_to_nested_URI) + elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): + nestedPaths.extend(findNestedURIs(self, URI_to_match, URI_to_sub, value, path_to_nested_URI+"["+key+"]" )) + return nestedPaths + + try: + sunfish_obj = self.storage_backend.read( object_URI) + obj_type = redfish_obj["@odata.type"].split('.')[0] + obj_type = obj_type.split("/")[-1] + obj_type = obj_type.replace("#","") # #Évent -> Event + # should not do aliasing on the members of a Collection + # since the members list should contain both original and aliased URIs + if "Collection" not in obj_type : + aliasedNestedPaths=[] + obj_modified = False + for agent_URI, sunfish_URI in agent_aliases.items(): + # find all the references to the aliased agent_URI and replace it + path_to_nested_URI="" + aliasedNestedPaths= findNestedURIs(self, agent_URI, sunfish_URI, sunfish_obj, path_to_nested_URI ) + if aliasedNestedPaths: + obj_modified = True + if obj_modified: + logger.info(json.dumps(sunfish_obj, indent=2)) + self.storage_backend.replace(sunfish_obj) + + except: + logger.error(f"could not update links in object {object_URI}") + + def updateAllAgentsRedirectedLinks(self ): + # after renaming all links, need to redirect the placeholder links + # will eventually replace file read & load of aliasDB with aliasDB passed in as arg + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + logger.error(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + modified_aliasDB = False + for owning_agent_id in uri_aliasDB['Agents_xref_URIs']: + logger.debug(f"redirecting placeholder links in all boundary ports for : {owning_agent_id}") + if owning_agent_id in uri_aliasDB['Agents_xref_URIs']: + if 'boundaryPorts' in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]: + for agent_bp_URI in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts']: + agent_bp_obj = self.storage_backend.read(agent_bp_URI) + logger.debug(f"------ redirecting links for {agent_bp_URI}") + # check PortType + if "PortType" in agent_bp_obj and agent_bp_obj["PortType"] == "InterswitchPort": + # We are assuming if one end of link is ISL, both must be + if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: + RedfishEventHandler.redirectInterswitchLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) + modified_aliasDB = True + # need to replace the update object and re-save the uri_aliasDB + self.storage_backend.replace(agent_bp_obj) + else: + logger.info(f"------ PeerPortURI NOT found") + pass + + elif "PortType" in agent_bp_obj and (agent_bp_obj["PortType"] == "UpstreamPort" ): + if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: + RedfishEventHandler.redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) + modified_aliasDB = True + # need to replace the update object and re-save the uri_aliasDB + self.storage_backend.replace(agent_bp_obj) + else: + logger.info(f"------ PeerPortURI NOT found") + pass + + elif "PortType" in agent_bp_obj and (agent_bp_obj["PortType"] == "DownstreamPort" ): + if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: + RedfishEventHandler.redirectDownstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) + modified_aliasDB = True + # need to replace the update object and re-save the uri_aliasDB + self.storage_backend.replace(agent_bp_obj) + else: + logger.info(f"------ PeerPortURI NOT found") + pass + + + + if modified_aliasDB: + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + return + + + def redirectInterswitchLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): + + + logger.info(f"redirecting Interswitch ConnectedSwitches and ConnectedSwitchPorts") + + agent_bp_URI = agent_bp_obj["@odata.id"] + redirected_CSP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["PeerPortURI"] + switch_uri_segments = redirected_CSP.split("/")[0:-2] + redirected_switch_link="" + for i in range(1,len(switch_uri_segments)): + redirected_switch_link = redirected_switch_link +"/" + switch_uri_segments[i] + logger.debug(f"------ redirected_switch_link is {redirected_switch_link}") + + if "Links" not in agent_bp_obj: + agent_bp_obj["Links"] = {} + if "ConnectedSwitchPorts" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedSwitchPorts"]=[] + if "ConnectedSwitches" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedSwitches"]=[] + if len(agent_bp_obj["Links"]["ConnectedSwitchPorts"]) >1: + logger.error(f"Interswitch Link claims >1 ConnectedSwitchPorts") + else: + if agent_bp_obj["Links"]["ConnectedSwitchPorts"]: + agent_placeholder_CSP = agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] = redirected_CSP + logger.info(f"redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CSP + else: # no placeholder links in ConnectedSwitchPorts array + agent_bp_obj["Links"]["ConnectedSwitchPorts"].append({"@odata.id":redirected_CSP}) + logger.info(f"created ConnectedSwitchPort to {redirected_CSP}") + + + if len(agent_bp_obj["Links"]["ConnectedSwitches"]) >1: + logger.error(f"Interswitch Link claims >1 ConnectedSwitches") else: - logger.debug("This is a collection") + if agent_bp_obj["Links"]["ConnectedSwitches"]: + agent_placeholder_switch_link = agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] = redirected_switch_link + logger.info(f"redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerSwitchURI"] = agent_placeholder_switch_link + else: # no placeholder links in ConnectedSwitches array + agent_bp_obj["Links"]["ConnectedSwitches"].append({"@odata.id":redirected_switch_link}) + logger.info(f"created ConnectedSwitches to {redirected_switch_link}") + + + def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): + + logger.info(f"redirecting UpstreamPort AssociatedEndpoints and ConnectedPorts") + + agent_bp_URI = agent_bp_obj["@odata.id"] + redirected_CP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["PeerPortURI"] + # find the parent (assumed to be a host) obj of this peer port + host_uri_segments = redirected_CP.split("/")[0:-2] + host_link="" + for i in range(1,len(host_uri_segments)): + host_link = host_link +"/" + host_uri_segments[i] + logger.debug(f"host_link is {host_link}") + + # extract the Endpoint URI associated with this parent object + host_obj = self.storage_backend.read(host_link) + redirected_endpoint = host_obj["Links"]["Endpoints"][0]["@odata.id"] + #redirected_endpoint = "None" #for now, to test + + if "Links" not in agent_bp_obj: + agent_bp_obj["Links"] = {} + if "ConnectedPorts" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedPorts"]=[] + if "AssociatedEndpoints" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["AssociatedEndpoints"]=[] + + if len(agent_bp_obj["Links"]["ConnectedPorts"]) >1: + logger.error(f"UpstreamPort Link claims >1 ConnectedPorts") + else: + if agent_bp_obj["Links"]["ConnectedPorts"]: + agent_placeholder_CP = agent_bp_obj["Links"]["ConnectedPorts"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedPorts"][0]["@odata.id"] = redirected_CP + logger.info(f"redirected {agent_placeholder_CP} to \n------ {redirected_CP}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CP + else: # no placeholder links in ConnectedSwitchPorts array + agent_bp_obj["Links"]["ConnectedPorts"].append({"@odata.id":redirected_CP}) + logger.info(f"created ConnectedPorts to {redirected_CP}") + + + if len(agent_bp_obj["Links"]["AssociatedEndpoints"]) >1: + logger.error(f"UpstreamPort Link claims >1 AssociatedEndpoints") + else: + if agent_bp_obj["Links"]["AssociatedEndpoints"]: + agent_placeholder_endpoint = agent_bp_obj["Links"]["AssociatedEndpoints"][0]["@odata.id"] + agent_bp_obj["Links"]["AssociatedEndpoints"][0]["@odata.id"] = redirected_endpoint + logger.info(f"redirected {agent_placeholder_endpoint} to \n------ {redirected_endpoint}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerEndpointURI"] = agent_placeholder_endpoint + else: # no placeholder links in AssociatedEndpoints array + agent_bp_obj["Links"]["AssociatedEndpoints"].append({"@odata.id":redirected_endpoint}) + logger.info(f"created AssociatedEndpoints to {redirected_endpoint}") + + def redirectDownstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): + + logger.info(f"redirecting Downstream ConnectedSwitches and ConnectedSwitchPorts") + + agent_bp_URI = agent_bp_obj["@odata.id"] + redirected_CSP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["PeerPortURI"] + switch_uri_segments = redirected_CSP.split("/")[0:-2] + redirected_switch_link="" + for i in range(1,len(switch_uri_segments)): + redirected_switch_link = redirected_switch_link +"/" + switch_uri_segments[i] + logger.info(f"------ redirected_switch_link is {redirected_switch_link}") + + if "Links" not in agent_bp_obj: + agent_bp_obj["Links"] = {} + if "ConnectedSwitchPorts" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedSwitchPorts"]=[] + if "ConnectedSwitches" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedSwitches"]=[] + if len(agent_bp_obj["Links"]["ConnectedSwitchPorts"]) >1: + logger.error(f"Downstream Link claims >1 ConnectedSwitchPorts") + else: + if agent_bp_obj["Links"]["ConnectedSwitchPorts"]: + agent_placeholder_CSP = agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] = redirected_CSP + logger.info(f"redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CSP + else: # no placeholder links in ConnectedSwitchPorts array + agent_bp_obj["Links"]["ConnectedSwitchPorts"].append({"@odata.id":redirected_CSP}) + logger.info(f"created ConnectedSwitchPort to {redirected_CSP}") + + + if len(agent_bp_obj["Links"]["ConnectedSwitches"]) >1: + logger.error(f"Downstream Link claims >1 ConnectedSwitches") + else: + if agent_bp_obj["Links"]["ConnectedSwitches"]: + agent_placeholder_switch_link = agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] = redirected_switch_link + logger.info(f"redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerSwitchURI"] = agent_placeholder_switch_link + else: # no placeholder links in ConnectedSwitches array + agent_bp_obj["Links"]["ConnectedSwitches"].append({"@odata.id":redirected_switch_link}) + logger.info(f"created ConnectedSwitches to {redirected_switch_link}") + + + + def updateSunfishAliasDB(self,sunfish_URI, agent_URI, aggregation_source): + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + logger.error(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + logger.debug(f"updating aliases for : {owning_agent_id}") + if owning_agent_id not in uri_aliasDB["Agents_xref_URIs"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agent_URI]=sunfish_URI + else: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agent_URI]=sunfish_URI + + if sunfish_URI not in uri_aliasDB["Sunfish_xref_URIs"]["aliases"]: + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfish_URI] = [] + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfish_URI].append(agent_URI) + else: + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfish_URI].append(agent_URI) + + # now need to write aliasDB back to file + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + + return uri_aliasDB + + def updateIfMergedFabrics(self,redfish_obj, uploading_agent_uri, sunfish_obj ): + # both objects must be Fabric objects + # both objects must have Sunfish_RM property + logger.info(f"----- merged fabric processed") + did_a_merge = True + # update sunfish_obj with agent_uri of redfish_obj as a sharer + new_obj_owner={"@odata.id":uploading_agent_uri} + + if "FabricSharedWith" in sunfish_obj["Oem"]["Sunfish_RM"]: + sunfish_obj["Oem"]["Sunfish_RM"]["FabricSharedWith"].append(new_obj_owner) + else: + sunfish_obj["Oem"]["Sunfish_RM"]["FabricSharedWith"] = [] + sunfish_obj["Oem"]["Sunfish_RM"]["FabricSharedWith"].append(new_obj_owner) + logger.debug(f"sunfish merged fabric object: {json.dumps(sunfish_obj,indent=2)}") + + return did_a_merge + + def checkForAliasedFabrics(self, redfish_obj, aggregation_source): + found_an_aliased_fabric = False + obj_type = redfish_obj["@odata.type"].split('.')[0] + obj_type = obj_type.replace("#","") # #Évent -> Event + if obj_type == "Fabric": + # TODO: + # check all existing Fabrics + # look for Fabric UUID in existing Fabrics + # compare UUIDs + if "UUID" in redfish_obj and "UUID" in sunfish_obj: + if redfish_obj['UUID'] == sunfish_obj['UUID']: + did_a_merge = True + # update both redfish_obj and sunfish_obj with Fabric xref in Sunfish_RM + new_obj_fabric_xref={"@odata.id":sunfish_obj["@odata.id"]} + existing_obj_fabric_xref={"@odata.id":redfish_obj["@odata.id"]} + if "MergedFabrics" in redfish_obj["Oem"]["Sunfish_RM"]: + redfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(new_obj_fabric_xref) + else: + redfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"] = [] + redfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(new_obj_fabric_xref) + + if "MergedFabrics" in sunfish_obj["Oem"]["Sunfish_RM"]: + sunfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(existing_obj_fabric_xref) + else: + sunfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"] = [] + sunfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(existing_obj_fabric_xref) + logger.debug(f"sunfish merged fabric object: {json.dumps(sunfish_obj,indent=2)}") + + else: + logger.debug(f"----- not same fabrics") + + return found_an_aliased_fabric + + def renameUploadedObject(self,redfish_obj, aggregation_source): + # redfish_obj uses agent namespace + # aggregation_source is an object in the Sunfish namespace + # this routine ONLY renames the @Odata.id and "id" + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + logger.error(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + agentGiven_obj_path = redfish_obj['@odata.id'] + agentGiven_segments = agentGiven_obj_path.split("/") + agentGiven_obj_name = agentGiven_segments[-1] + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + # generate a new path and object name + logger.debug(f"renaming object: {agentGiven_obj_path}") + logger.debug(f"agent id: {owning_agent_id}") + sunfishGiven_obj_name = "Sunfish_"+owning_agent_id[:4]+"_"+agentGiven_obj_name + sunfishGiven_obj_path = "/" + for i in range(1,len(agentGiven_segments)-1): + sunfishGiven_obj_path=sunfishGiven_obj_path + agentGiven_segments[i]+"/" + sunfishGiven_obj_path=sunfishGiven_obj_path + sunfishGiven_obj_name + # need to check new name is also unused + if sunfishGiven_obj_path in uri_aliasDB["Sunfish_xref_URIs"]["aliases"]: + # new name was still not unique, just brute force it! + temp_string = "Sunfish_"+owning_agent_id+"_"+agentGiven_obj_name + sunfishGiven_obj_path=sunfishGiven_obj_path.replace(sunfishGiven_obj_name,temp_string) + + # + logger.debug(sunfishGiven_obj_path) + redfish_obj['@odata.id'] = sunfishGiven_obj_path + if redfish_obj['Id'] == agentGiven_obj_name: + redfish_obj['Id'] = sunfishGiven_obj_name + # now need to update aliasDB + if owning_agent_id not in uri_aliasDB["Agents_xref_URIs"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agentGiven_obj_path]=sunfishGiven_obj_path + else: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agentGiven_obj_path]=sunfishGiven_obj_path + + if sunfishGiven_obj_path not in uri_aliasDB["Sunfish_xref_URIs"]["aliases"]: + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfishGiven_obj_path] = [] + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfishGiven_obj_path].append(agentGiven_obj_path) + else: + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfishGiven_obj_path].append(agentGiven_obj_path) + + # now need to write aliasDB back to file + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + + return redfish_obj + + + def match_boundary_port(self, searching_agent_id, searching_port_URI, URI_aliasDB): + + matching_port_URIs = [] + # pull up the link partner dict for this agent.Port + searching_for = URI_aliasDB['Agents_xref_URIs'][searching_agent_id]\ + ['boundaryPorts'][searching_port_URI] + + if "RemoteLinkPartnerId" in searching_for: + searching_for_remote_partnerId =searching_for["RemoteLinkPartnerId"] + else: + searching_for_remote_partnerId = 'No remote partnerId' # do NOT use 'None' or "" + if "RemotePortId" in searching_for: + searching_for_remote_portId =searching_for["RemotePortId"] + else: + searching_for_remote_portId = 'No remote portId' # do NOT use 'None' or "" + if "LocalLinkPartnerId" in searching_for: + searching_for_local_partnerId =searching_for["LocalLinkPartnerId"] + else: + searching_for_local_partnerId = 'No local partnerId' # do NOT use 'None' or "" + if "LocalPortId" in searching_for: + searching_for_local_portId =searching_for["LocalPortId"] + else: + searching_for_local_portId = 'No local portId' # do NOT use 'None' or "" + + logger.info(f"searching for match to {searching_port_URI}") + for agent_id, agent_db in URI_aliasDB['Agents_xref_URIs'].items(): + if agent_id != searching_agent_id and 'boundaryPorts' in agent_db: + for port_URI, port_details in agent_db['boundaryPorts'].items(): + # always check if the remote port device ID is found first + if ("LocalLinkPartnerId" in port_details) and \ + (port_details["LocalLinkPartnerId"] == searching_for_remote_partnerId) and \ + ("LocalPortId" in port_details) and \ + (port_details["LocalPortId"] == searching_for_remote_portId): + matching_port_URIs.append(port_URI) + # cross reference BOTH agents' boundaryPorts + logger.info(f"----- found a matching port {port_URI}") + URI_aliasDB['Agents_xref_URIs'][agent_id]['boundaryPorts']\ + [port_URI]['PeerPortURI'] = searching_port_URI + URI_aliasDB['Agents_xref_URIs'][searching_agent_id]['boundaryPorts']\ + [searching_port_URI]['PeerPortURI'] = port_URI + # only check if the local port device ID is being waited on if first check fails + else: + if ("RemoteLinkPartnerId" in port_details) and \ + (port_details["RemoteLinkPartnerId"] == searching_for_local_partnerId) and \ + ("RemotePortId" in port_details) and \ + (port_details["RemotePortId"] == searching_for_local_portId): + matching_port_URIs.append(port_URI) + # cross reference BOTH agent's boundaryPorts + logger.info(f"----- found a matching port {port_URI}") + URI_aliasDB['Agents_xref_URIs'][agent_id]['boundaryPorts']\ + [port_URI]['PeerPortURI'] = searching_port_URI + URI_aliasDB['Agents_xref_URIs'][searching_agent_id]['boundaryPorts']\ + [searching_port_URI]['PeerPortURI'] = port_URI + + + logger.debug(f"matching_ports {matching_port_URIs}") + return matching_port_URIs + + + + def track_boundary_port(self, redfish_obj, aggregation_source): + + agent_alias_dict = { + "aliases":{}, + "boundaryPorts":{} + } + + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + logger.error(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + logger.info(f"---- now processing a boundary port") + obj_type = redfish_obj["@odata.type"].split(".")[0] + obj_type = obj_type.replace("#","") + save_alias_file = False + port_protocol = redfish_obj["PortProtocol"] + port_type = redfish_obj["PortType"] + port_bc_flag = redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] + if obj_type == "Port" and port_bc_flag == "BoundaryPort": + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + localPortURI = redfish_obj['@odata.id'] + if port_protocol=="CXL" and (port_type == "InterswitchPort" or \ + port_type== "UpstreamPort" or port_type== "DownstreamPort"): + # create a boundPort entry in uri_aliasDB + if owning_agent_id not in uri_aliasDB["Agents_xref_URIs"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = agent_alias_dict + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]={} + elif "boundaryPorts" not in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI] = {} + + # log what the fabric port reports its own PortId and its own LinkPartnerId + if "CXL" in redfish_obj and "LinkPartnerTransmit" in redfish_obj["CXL"]: # rely on 'and' short circuiting + local_link_partner_id = redfish_obj["CXL"]["LinkPartnerTransmit"]["LinkPartnerId"] + local_port_id = redfish_obj["CXL"]["LinkPartnerTransmit"]["PortId"] + if localPortURI not in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ + ["LocalLinkPartnerId"] = local_link_partner_id + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ + ["LocalPortId"] = local_port_id + + # log if the fabric port reports its received LinkPartnerInfo from other end + if "CXL" in redfish_obj and "LinkPartnerReceive" in redfish_obj["CXL"]: # rely on 'and' short circuiting + remote_link_partner_id = redfish_obj["CXL"]["LinkPartnerReceive"]["LinkPartnerId"] + remote_port_id = redfish_obj["CXL"]["LinkPartnerReceive"]["PortId"] + logger.debug(f"---- obj link_partner_id {remote_link_partner_id}") + if localPortURI not in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ + ["RemoteLinkPartnerId"] =remote_link_partner_id + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ + ["RemotePortId"] = remote_port_id + + # now need to write aliasDB back to file + save_alias_file = True + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + else: + logger.debug(f"---- CXL BoundaryPort found, but not InterswitchPort, UpstreamPort, or DownstreamPort") + pass + matching_ports = RedfishEventHandler.match_boundary_port(self, owning_agent_id, localPortURI, uri_aliasDB) + if matching_ports or save_alias_file: + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + logger.debug(f"----- boundary ports matched {matching_ports}") + return + + def add_aggregation_source_reference(redfish_obj, aggregation_source): + # BoundaryComponent = ["owned", "foreign", "BoundaryLink","unknown"] oem = { "@odata.type": "#SunfishExtensions.v1_0_0.ResourceExtensions", "ManagingAgent": { "@odata.id": aggregation_source["@odata.id"] - } + }, + "BoundaryComponent": "owned" } if "Oem" not in redfish_obj: redfish_obj["Oem"] = {"Sunfish_RM": oem} @@ -367,6 +1258,11 @@ def add_aggregation_source_reference(redfish_obj, aggregation_source): logger.warning(f"""The object {redfish_obj["@odata.id"]} returned while registering agent {aggregation_source["@odata.id"]} contains already a managing agent ({redfish_obj['Oem']['Sunfish_RM']['ManagingAgent']['@odata.id']}) and this should not be happening""") + # the expected case is there is no ManagingAgent before this event handler creates the object, for now even if the Agent has + # set this value, we will over write. redfish_obj["Oem"]["Sunfish_RM"]["ManagingAgent"] = { "@odata.id": aggregation_source["@odata.id"] } + if "BoundaryComponent" not in redfish_obj["Oem"]["Sunfish_RM"]: + redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] = oem["BoundaryComponent"] + diff --git a/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py b/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py index 50c5fd4..3dd21a1 100644 --- a/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py +++ b/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py @@ -1,8 +1,10 @@ # Copyright IBM Corp. 2024 +# Copyright Hewlett Packard Enterprise Development LP 2024 # This software is available to you under a BSD 3-Clause License. # The full license terms are available here: https://github.com/OpenFabrics/sunfish_library_reference/blob/main/LICENSE import json +import os import logging import string import requests @@ -35,21 +37,34 @@ def get_id(self) -> string: def is_agent_managed(cls, sunfish_core: 'sunfish.lib.core.Core', path: string): # if this is a top level resource, there's no need to check for the agent as no agent can own top level ones. # Example of top levels is Systems, Chassis, etc... - level = len(path.replace(sunfish_core.conf["redfish_root"], "").split("/")) + path_to_owner = (path.replace(sunfish_core.conf["redfish_root"], "").split("/")) + level = len(path_to_owner) if level == 1: return None + # path passed in is to parent object, which is usually a collection collection = sunfish_core.storage_backend.read(path) + if 'Collection' in collection["@odata.type"]: + print(f"parent obj {path} is a Collection.") + new_path = os.path.join('/'.join(path_to_owner[:-1])) + print(f"grandparent obj at {new_path}") + collection = sunfish_core.storage_backend.read(new_path) + logger.debug(f"Checking if the object {new_path} is managed by an Agent") + if "Oem" in collection and "Sunfish_RM" in collection["Oem"] and "ManagingAgent" in collection["Oem"]["Sunfish_RM"]: + agent_path = collection["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] + return Agent(sunfish_core, agent_path) + else: + # + logger.debug(f"Checking if the object {path} is managed by an Agent") + if "Oem" in collection and "Sunfish_RM" in collection["Oem"] and "ManagingAgent" in collection["Oem"]["Sunfish_RM"]: + agent_path = collection["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] + return Agent(sunfish_core, agent_path) - logger.debug(f"Checking if the object {path} is managed by an Agent") - if "Oem" in collection and "Sunfish_RM" in collection["Oem"]: - agent = collection["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] - return Agent(sunfish_core, agent) return None def _forward_get_request(self, path: string) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path logger.debug(f"Forwarding resource GET request {resource_uri}") try: @@ -67,7 +82,8 @@ def _forward_get_request(self, path: string) -> dict: raise e def _forward_create_request(self, path: string, payload: dict) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path + #resource_uri = agent_uri+ "/" + path logger.debug(f"Forwarding resource CREATE request {resource_uri}") try: @@ -85,7 +101,7 @@ def _forward_create_request(self, path: string, payload: dict) -> dict: raise e def _forward_delete_request(self, path: string) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path logger.debug(f"Forwarding resource DELETE request {resource_uri}") try: @@ -101,7 +117,7 @@ def _forward_delete_request(self, path: string) -> dict: raise e def _forward_patch_request(self, path: string, payload: dict) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path logger.debug(f"Forwarding resource PATCH request {resource_uri}") try: @@ -121,7 +137,7 @@ def _forward_patch_request(self, path: string, payload: dict) -> dict: raise e def _forward_replace_request(self, path: string, payload: dict) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path logger.debug(f"Forwarding resource REPLACE request {resource_uri}") try: @@ -158,6 +174,8 @@ def forward_request(self, request: SunfishRequestType, path: string, payload: di if payload is None: logger.error("CREATE request payload missing") raise AgentForwardingFailure("CREATE", -1, "Missing payload") + #return self._forward_create_request(path, payload) + #return agents_management._forward_create_request(path, payload) return self._forward_create_request(path, payload) elif request == SunfishRequestType.DELETE: return self._forward_delete_request(path) diff --git a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py index a0ae67f..3d86441 100644 --- a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py +++ b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py @@ -4,6 +4,9 @@ import logging import string from typing import Optional +import pdb +import os +import json import sunfish.lib.core from sunfish_plugins.objects_managers.sunfish_agent.agents_management import Agent @@ -20,8 +23,13 @@ def __init__(self, core: 'sunfish.lib.core.Core'): self.core = core def forward_to_manager(self, request_type: 'sunfish.models.types.SunfishRequestType', path: string, payload: dict = None) -> Optional[dict]: + uri_aliasDB = {} agent_response = None + object_modified = False path_to_check = path + print(f"!!obj path to foward is {path}") + print(f"!!request_type is {request_type}") + #pdb.set_trace() if request_type == SunfishRequestType.CREATE: # When creating an object, the request must be done on the collection. Since collections are generally not # marked with the managing agent we check whether the parent of the collection, that must be a single entity @@ -40,10 +48,14 @@ def forward_to_manager(self, request_type: 'sunfish.models.types.SunfishRequestT # get the parent path logger.debug(f"Checking managing agent for path: {path_to_check}") agent = Agent.is_agent_managed(self.core, path_to_check) + print(f"managing agent is {agent}") if agent: logger.debug(f"{path} is managed by an agent, forwarding the request") + obj_modified = self.xlateToAgentURIs(payload) + # extract restored name from payload + restored_path = payload["@odata.id"] try: - agent_response = agent.forward_request(request_type, path, payload=payload) + agent_response = agent.forward_request(request_type, restored_path, payload=payload) except AgentForwardingFailure as e: raise e @@ -71,7 +83,178 @@ def forward_to_manager(self, request_type: 'sunfish.models.types.SunfishRequestT agent_response["Oem"]["Sunfish_RM"]["ManagingAgent"] = { "@odata.id": agent.get_id() } + # anything un-aliased for agent has to be undone + # anything added by agent may need translated + obj_modified = self.xlateToSunfishURIs(agent_response) else: logger.debug(f"{path} is not managed by an agent") + return agent_response + + def xlateToAgentURIs(self, sunfish_obj ): + + def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): + nestedPaths = [] + if type(obj) == list: + i = 0; + for entry in obj: + if type(entry) == list or type(entry) == dict: + nestedPaths.extend( findNestedURIs(self, URI_to_match, URI_to_sub, entry, path_to_nested_URI+"["+str(i)+"]")) + else: + i=i+1 + if type(obj) == dict: + for key,value in obj.items(): + if key == '@odata.id'and path_to_nested_URI != "": + # check @odata.id: value for an alias + if value == URI_to_match: + print(f"---- modifying {value} to {URI_to_sub}") + obj[key] = URI_to_sub + nestedPaths.append(path_to_nested_URI) + elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): + nestedPaths.extend(findNestedURIs(self, URI_to_match, URI_to_sub, value, path_to_nested_URI+"["+key+"]" )) + return nestedPaths + + + try: + uri_alias_file = os.path.join(os.getcwd(), self.core.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + print(f"reading alias file {uri_alias_file}") + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + try: + sunfish_aliases = uri_aliasDB["Sunfish_xref_URIs"]["aliases"] + #pdb.set_trace() + object_URI = sunfish_obj["@odata.id"] + aliasedNestedPaths=[] + obj_modified = False + # check the obj ID and initial @odata.id + if object_URI in sunfish_aliases: + sunfish_obj["@odata.id"] = sunfish_aliases[object_URI][0] + obj_modified = True + if sunfish_obj["Id"] == object_URI.split("/")[-1]: + sunfish_obj["Id"] = sunfish_aliases[object_URI][0].split("/")[-1] + # now find the nested @odata.id URIs and check them + for sunfish_URI, agent_URI in sunfish_aliases.items(): + # find all the references to the aliased sunfish_URI and replace it + path_to_nested_URI="" + # TODO agent_URI structure is a list, not a simple text string, v hence this index! + aliasedNestedPaths= findNestedURIs(self, sunfish_URI, agent_URI[0], sunfish_obj, path_to_nested_URI ) + if aliasedNestedPaths: + obj_modified = True + for path in aliasedNestedPaths: + print(f"---- replaced {sunfish_URI} with {agent_URI} at {path}") + print(f"---- aliasedNestedPaths is {aliasedNestedPaths}") + if obj_modified: + logger.debug(f"---- object modified") + print(f"---- final updated object") + print(json.dumps(sunfish_obj, indent=2)) + pass + + if "Oem" in sunfish_obj and "Sunfish_RM" in sunfish_obj["Oem"] and \ + "BoundaryComponent" in sunfish_obj["Oem"]["Sunfish_RM"]: + if sunfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": + # need to check for boundary port redirected links + # TODO + print(f"------ checking for redirected boundary link") + pass + + except: + logger.error(f"could not update links in object {object_URI}") + + #return sunfish_obj + return obj_modified + + + def xlateToSunfishURIs(self, agent_obj ): + + def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): + nestedPaths = [] + if type(obj) == list: + i = 0; + for entry in obj: + if type(entry) == list or type(entry) == dict: + nestedPaths.extend( findNestedURIs(self, URI_to_match, URI_to_sub, entry, path_to_nested_URI+"["+str(i)+"]")) + else: + i=i+1 + if type(obj) == dict: + for key,value in obj.items(): + if key == '@odata.id'and path_to_nested_URI != "": + # check @odata.id: value for an alias + if value == URI_to_match: + print(f"---- modifying {value} to {URI_to_sub}") + obj[key] = URI_to_sub + nestedPaths.append(path_to_nested_URI) + elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): + nestedPaths.extend(findNestedURIs(self, URI_to_match, URI_to_sub, value, path_to_nested_URI+"["+key+"]" )) + return nestedPaths + + + try: + uri_alias_file = os.path.join(os.getcwd(), self.core.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + print(f"reading alias file {uri_alias_file}") + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + try: + #pdb.set_trace() + owning_agent_id = agent_obj["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"].split("/")[-1] + agent_aliases = uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] + object_URI = agent_obj["@odata.id"] + aliasedNestedPaths=[] + obj_modified = False + # check the obj ID and initial @odata.id + if object_URI in agent_aliases: + agent_obj["@odata.id"] = agent_aliases[object_URI] + obj_modified = True + if agent_obj["Id"] == object_URI.split("/")[-1]: + agent_obj["Id"] = agent_aliases[object_URI].split("/")[-1] + # now find the nested @odata.id URIs and check them + for agent_URI,sunfish_URI in agent_aliases.items(): + # find all the references to the aliased sunfish_URI and replace it + path_to_nested_URI="" + # TODO agent_URI structure is a list, not a simple text string, v hence this index! + aliasedNestedPaths= findNestedURIs(self, agent_URI, sunfish_URI, agent_obj, path_to_nested_URI ) + if aliasedNestedPaths: + obj_modified = True + for path in aliasedNestedPaths: + print(f"---- replaced {agent_URI } with {sunfish_URI} at {path}") + if obj_modified: + logger.debug(f"---- object modified") + print(f"---- final updated object") + print(json.dumps(agent_obj, indent=2)) + pass + + if "Oem" in agent_obj and "Sunfish_RM" in agent_obj["Oem"] and \ + "BoundaryComponent" in agent_obj["Oem"]["Sunfish_RM"]: + if agent_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": + # need to check for boundary port redirected links + # TODO + print(f"------ checking for redirected boundary link") + pass + + except: + logger.error(f"could not update links in object {object_URI}") + + #return sunfish_obj + return obj_modified + + diff --git a/sunfish_plugins/storage/file_system_backend/backend_FS.py b/sunfish_plugins/storage/file_system_backend/backend_FS.py index c9aed0b..998c076 100644 --- a/sunfish_plugins/storage/file_system_backend/backend_FS.py +++ b/sunfish_plugins/storage/file_system_backend/backend_FS.py @@ -2,6 +2,7 @@ # This software is available to you under a BSD 3-Clause License. # The full license terms are available here: https://github.com/OpenFabrics/sunfish_library_reference/blob/main/LICENSE +import pdb import json import logging import os @@ -52,7 +53,7 @@ def write(self, payload: dict): Args: payload (json): json representing the resource that should be stored. - Raisexs: + Raises: CollectionNotSupported: the storage of the collections is not supported. AlreadyExists: it is not possible to have duplicate resources with the same ID. @@ -64,18 +65,23 @@ def write(self, payload: dict): # get ID and collection from payload length = len(self.redfish_root) id = payload['@odata.id'][length:] # id without redfish.root (es. /redfish/v1/) + parent_is_collection = True # default assumption + print(f"BackendFS.write called on {id}") id = id.split('/') for index in range(2, len(id[1:])): to_check = os.path.join('/'.join(id[:index]), 'index.json') to_check = os.path.join(os.getcwd(), self.root, to_check) + print(f"BackendFS.write(): path to check: {to_check}") if os.path.exists(to_check) is False: + print("path does not exist\n") raise ActionNotAllowed() - + ''' with open(to_check, 'r') as data_json: data = json.load(data_json) data_json.close() if 'Collection' in data["@odata.type"]: + print("path is to a Collection\n") members = data["Members"] for x in members: if x["@odata.id"] == os.path.join(self.redfish_root, '/'.join(id[:index + 1])): @@ -90,10 +96,13 @@ def write(self, payload: dict): present = True else: el["@odata.id"] = os.path.join(self.redfish_root, '/'.join(id[:index + 1])) + print(f"BackendFS.write of {el['@odata.id']}") with open(to_check, 'w') as data_json: json.dump(data, data_json, indent=4, sort_keys=True) data_json.close() + ''' + # we get here only if all grandparent objects exist last_element = len(id) - 1 collection_type = id[last_element - 1] resource_id = id[last_element] @@ -109,13 +118,18 @@ def write(self, payload: dict): collection_type) # collection_path .../Resources/[folder], collection_type = [folder] parent_path = os.path.dirname(collection_path) # parent path .../Resources + #pdb.set_trace() # check if the directory of the Collection already exists if not os.path.exists(collection_path): + # if parent directory doesn't exist, we assume it is a collection and create the collection + print(f"backendFS.write: making collection path directory") os.makedirs(collection_path) + # the following line assumes the path element name dictates the collection type + # it is more proper to examine the @odata.type property of the object being created! config = utils.generate_collection(collection_type) - # if the item to be written is managed by an agent, we want the collection containing it to also be maked + # if the item to be written is managed by an agent, we want the collection containing it to also be marked # accordingly. We do this only for collections to be created because we assume that if the collection is # there already: # a. The collection is a first level one that is managed by Sunfish @@ -140,13 +154,26 @@ def write(self, payload: dict): else: # checks if there is already a resource with the same id index_path = os.path.join(collection_path, "index.json") - if utils.check_unique_id(index_path, payload['@odata.id']) is False: - raise AlreadyExists(payload['@odata.id']) + with open(index_path, 'r') as data_json: + parent_data = json.load(data_json) + data_json.close() + if 'Collection' in parent_data["@odata.type"]: + print("parent path is to a Collection\n") + if utils.check_unique_id(index_path, payload['@odata.id']) is False: + raise AlreadyExists(payload['@odata.id']) + pass + else: + print("path is to an object\n") + parent_is_collection = False # + pass + + # creates folder of the element and write index.json (assuming that the payload is valid i dont use any kind of template to write index.json) - folder_id_path = os.path.join(collection_path, resource_id) # .../Resources/[folder]/[id] + folder_id_path = os.path.join(collection_path, resource_id) # .../Resources/[folder]/[resource_id] - # creates the folder of the element + # if folder does not exist, check the parent path + # not sure we need this next check given we do the same above if not os.path.exists(folder_id_path): os.mkdir(folder_id_path) parent_path = os.path.join(*folder_id_path.split("/")[:-2]) @@ -154,10 +181,11 @@ def write(self, payload: dict): root_path = os.path.join(os.getcwd(), self.root) if not os.path.exists(parent_json) and parent_path != root_path[1:]: logger.warning( - "You should not be here, this is crating an entire path where multiple folders are not existing") + "You should not be here, this is creating an entire path with multiple missing grandparents") + logger.info(f"backend_FS.write: writing {folder_id_path}/index.json") with open(os.path.join(folder_id_path, "index.json"), "w") as fd: fd.write(json.dumps(payload, indent=4, sort_keys=True)) fd.close() @@ -165,10 +193,12 @@ def write(self, payload: dict): json_collection_path = os.path.join(collection_path, 'index.json') # updates the collection with the new element created - if os.path.exists(json_collection_path): - utils.update_collections_json(path=json_collection_path, link=payload['@odata.id']) - else: - utils.generate_collection(collection_type) + if parent_is_collection: # need to insert new member into collection + if os.path.exists(json_collection_path): + utils.update_collections_json(path=json_collection_path, link=payload['@odata.id']) + else: + utils.generate_collection(collection_type) + pass # Events have to be handled in a different way. # To check if write() is called by an event subscription (EventDestination format) I check 'Destination' because @@ -341,3 +371,32 @@ def remove(self, path:str): to_replace = False return "DELETE: file removed." + + + + def reset_resources(self, resource_path: str, clean_resource_path: str): + ### + # this command ONLY applies to the File System storage backend + # The arguments are: + # - clean_resource_path: "" + # - resource_path: "" + # there is no protection on the receipt of this command + # This command will not work if the backend file system is not the host's filesystem! + # + logger.info("reset_resources method called") + logger.info(f"fs root resource path is {resource_path}") + logger.info(f"clean_resource path is {clean_resource_path}") + try: + if os.path.exists(resource_path) and os.path.exists(clean_resource_path): + shutil.rmtree(resource_path) + shutil.copytree(clean_resource_path, resource_path) + logger.debug("reset_resources complete") + resp = "OK", 204 + else: + logger.debug("reset_resources: one or more paths do not exist.") + pass + except Exception: + raise Exception("reset_resources Failed") + resp = "Fail", 500 + return resp +