From fadb46714f4d1f3f6fa31eb46de0042d6db9d6ba Mon Sep 17 00:00:00 2001 From: rherrell Date: Wed, 10 Jul 2024 18:21:50 -0600 Subject: [PATCH 01/28] added the clear function to the events --- .../redfish/redfish_event_handler.py | 44 +++++++++++++++++-- .../storage/file_system_backend/backend_FS.py | 2 +- 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index aedeafa..75b8735 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -5,6 +5,9 @@ import logging import os import warnings +import shutil +from uuid import uuid4 + import requests from sunfish.events.event_handler_interface import EventHandlerInterface @@ -12,6 +15,7 @@ from sunfish.lib.exceptions import * logger = logging.getLogger("RedfishEventHandler") +logging.basicConfig(level=logging.DEBUG) class RedfishEventHandlersTable: @@ -21,8 +25,8 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event # Fabric Agents are modelled as AggregationSource objects (RedFish v2023.1 at the time of writing this comment) # Registration will happen with the OFMF receiving a and event with MessageId: AggregationSourceDiscovered # The arguments of the event message are: - # - Arg1: "Redfish" - # - Arg2: "agent_ip:port" + # - Arg0: "Redfish" + # - Arg1: "agent_ip:port" # I am also assuming that the agent name to be used is contained in the OriginOfCondifiton field of the event as in the below example: # { # "OriginOfCondition: [ @@ -98,10 +102,36 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont event_handler.core.storage_backend.patch(id, aggregation_source) + @classmethod + def ClearResources(cls, event_handler: EventHandlerInterface, event: dict, context: str): + ### + # Receipt of this event will cause the core library to remove the entire Resources tree, and reload a clean initial tree + # This will happen upon the Core receiving an event with MessageId: ClearResources + # The arguments of the event message are: + # - Arg0: " + # there is no protection on the receipt of this event + # This event will not work if the backend file system is not the host's filesystem! + # + logger.info("ClearResources method called") + resource_path = event['MessageArgs'][0] # relative Resource Path + logger.info(f"ClearResources path is {resource_path}") + try: + if os.path.exists('Resources'): + shutil.rmtree('Resources') + + shutil.copytree(resource_path, 'Resources') + resp = 204 + except Exception: + raise Exception("ClearResources Failed") + resp = 500 + return resp + + class RedfishEventHandler(EventHandlerInterface): dispatch_table = { "AggregationSourceDiscovered": RedfishEventHandlersTable.AggregationSourceDiscovered, - "ResourceCreated": RedfishEventHandlersTable.ResourceCreated + "ResourceCreated": RedfishEventHandlersTable.ResourceCreated, + "ClearResources" : RedfishEventHandlersTable.ClearResources } def __init__(self, core): @@ -339,11 +369,13 @@ def createInspectedObject(self,redfish_obj, aggregation_source): logger.debug("This is a collection") def add_aggregation_source_reference(redfish_obj, aggregation_source): + # BoundaryComponent = ["true", "false", "unknown"] oem = { "@odata.type": "#SunfishExtensions.v1_0_0.ResourceExtensions", "ManagingAgent": { "@odata.id": aggregation_source["@odata.id"] - } + }, + "BoundaryComponent": "unknown" } if "Oem" not in redfish_obj: redfish_obj["Oem"] = {"Sunfish_RM": oem} @@ -362,6 +394,10 @@ def add_aggregation_source_reference(redfish_obj, aggregation_source): logger.warning(f"""The object {redfish_obj["@odata.id"]} returned while registering agent {aggregation_source["@odata.id"]} contains already a managing agent ({redfish_obj['Oem']['Sunfish_RM']['ManagingAgent']['@odata.id']}) and this should not be happening""") + # the expected case is there is no ManagingAgent before this event handler creates the object, for now even if the Agent has + # set this value, we will over write. redfish_obj["Oem"]["Sunfish_RM"]["ManagingAgent"] = { "@odata.id": aggregation_source["@odata.id"] } + if "BoundaryComponent" not in redfish_obj["Oem"]["Sunfish_RM"]: + redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] = oem["BoundaryComponent"] diff --git a/sunfish_plugins/storage/file_system_backend/backend_FS.py b/sunfish_plugins/storage/file_system_backend/backend_FS.py index c9aed0b..99c91b4 100644 --- a/sunfish_plugins/storage/file_system_backend/backend_FS.py +++ b/sunfish_plugins/storage/file_system_backend/backend_FS.py @@ -115,7 +115,7 @@ def write(self, payload: dict): config = utils.generate_collection(collection_type) - # if the item to be written is managed by an agent, we want the collection containing it to also be maked + # if the item to be written is managed by an agent, we want the collection containing it to also be marked # accordingly. We do this only for collections to be created because we assume that if the collection is # there already: # a. The collection is a first level one that is managed by Sunfish From 1fc483d755bb6097137f713f38b8d2a126e6c1b8 Mon Sep 17 00:00:00 2001 From: rherrell Date: Wed, 10 Jul 2024 18:23:24 -0600 Subject: [PATCH 02/28] added clearResources and TriggerEvents --- .../redfish/redfish_event_handler.py | 82 ++++++++++++++++++- 1 file changed, 78 insertions(+), 4 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index aedeafa..5be6941 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -5,6 +5,9 @@ import logging import os import warnings +import shutil +from uuid import uuid4 + import requests from sunfish.events.event_handler_interface import EventHandlerInterface @@ -12,6 +15,7 @@ from sunfish.lib.exceptions import * logger = logging.getLogger("RedfishEventHandler") +logging.basicConfig(level=logging.DEBUG) class RedfishEventHandlersTable: @@ -21,8 +25,8 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event # Fabric Agents are modelled as AggregationSource objects (RedFish v2023.1 at the time of writing this comment) # Registration will happen with the OFMF receiving a and event with MessageId: AggregationSourceDiscovered # The arguments of the event message are: - # - Arg1: "Redfish" - # - Arg2: "agent_ip:port" + # - Arg0: "Redfish" + # - Arg1: "agent_ip:port" # I am also assuming that the agent name to be used is contained in the OriginOfCondifiton field of the event as in the below example: # { # "OriginOfCondition: [ @@ -98,10 +102,74 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont event_handler.core.storage_backend.patch(id, aggregation_source) + @classmethod + def ClearResources(cls, event_handler: EventHandlerInterface, event: dict, context: str): + ### + # Receipt of this event will cause the core library to remove the entire Resources tree, and reload a clean initial tree + # This will happen upon the Core receiving an event with MessageId: ClearResources + # The arguments of the event message are: + # - Arg0: " + # there is no protection on the receipt of this event + # This event will not work if the backend file system is not the host's filesystem! + # + logger.info("ClearResources method called") + resource_path = event['MessageArgs'][0] # relative Resource Path + logger.info(f"ClearResources path is {resource_path}") + try: + if os.path.exists('Resources'): + shutil.rmtree('Resources') + + shutil.copytree(resource_path, 'Resources') + resp = 204 + except Exception: + raise Exception("ClearResources Failed") + resp = 500 + return resp + + + @classmethod + def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context: str): + ### + # Receipt of this event will cause the core library to retrieve and send a specific event to a specific target + # This will happen upon the API receiving an event with MessageId: TriggerEvent + # The arguments of the event message are: + # - Arg0: "EventDescriptor" --relative OS Filesystem path from core library application home directory + # - Arg1: "target_IP:port" + # there is no protection on the inadvertant receipt of this event + # + logger.info("TriggerEvent method called") + file_to_send = event['MessageArgs'][0] # relative Resource Path + hostname = event['MessageArgs'][1] # Agent address + initiator = event['OriginOfCondition']['@odata.id'] + logger.info(f"file_to_send path is {file_to_send}") + try: + if os.path.exists('file_to_send'): + #shutil.rmtree('Resources') + print("found the event file") + # event_to_send = contents of file_to_send + + # these lines are not yet correct!! + # send the event as a POST to the EventListener + #response = requests.post(f"{hostname}/EventListener",event_to_send) + #if response.status_code != 200: + # raise Exception("Cannot find ConnectionMethod") + #response = response.json() + + resp = 204 + except Exception: + raise Exception("TriggerEvents Failed") + resp = 500 + return resp + + + + class RedfishEventHandler(EventHandlerInterface): dispatch_table = { "AggregationSourceDiscovered": RedfishEventHandlersTable.AggregationSourceDiscovered, - "ResourceCreated": RedfishEventHandlersTable.ResourceCreated + "ResourceCreated": RedfishEventHandlersTable.ResourceCreated, + "TriggerEvent": RedfishEventHandlersTable.TriggerEvent, + "ClearResources" : RedfishEventHandlersTable.ClearResources } def __init__(self, core): @@ -339,11 +407,13 @@ def createInspectedObject(self,redfish_obj, aggregation_source): logger.debug("This is a collection") def add_aggregation_source_reference(redfish_obj, aggregation_source): + # BoundaryComponent = ["true", "false", "unknown"] oem = { "@odata.type": "#SunfishExtensions.v1_0_0.ResourceExtensions", "ManagingAgent": { "@odata.id": aggregation_source["@odata.id"] - } + }, + "BoundaryComponent": "unknown" } if "Oem" not in redfish_obj: redfish_obj["Oem"] = {"Sunfish_RM": oem} @@ -362,6 +432,10 @@ def add_aggregation_source_reference(redfish_obj, aggregation_source): logger.warning(f"""The object {redfish_obj["@odata.id"]} returned while registering agent {aggregation_source["@odata.id"]} contains already a managing agent ({redfish_obj['Oem']['Sunfish_RM']['ManagingAgent']['@odata.id']}) and this should not be happening""") + # the expected case is there is no ManagingAgent before this event handler creates the object, for now even if the Agent has + # set this value, we will over write. redfish_obj["Oem"]["Sunfish_RM"]["ManagingAgent"] = { "@odata.id": aggregation_source["@odata.id"] } + if "BoundaryComponent" not in redfish_obj["Oem"]["Sunfish_RM"]: + redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] = oem["BoundaryComponent"] From a5a968dcf6cde267eba80c39f7a493f680bfd8df Mon Sep 17 00:00:00 2001 From: rherrell Date: Fri, 19 Jul 2024 12:03:49 -0600 Subject: [PATCH 03/28] installed reset_resources in file system backend_FS.py --- sunfish/storage/backend_interface.py | 7 ++++- .../storage/file_system_backend/backend_FS.py | 29 +++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/sunfish/storage/backend_interface.py b/sunfish/storage/backend_interface.py index 7141952..765514a 100644 --- a/sunfish/storage/backend_interface.py +++ b/sunfish/storage/backend_interface.py @@ -1,4 +1,5 @@ # Copyright IBM Corp. 2023 +# Copyright Hewlett Packard Enterprise 2024 # This software is available to you under a BSD 3-Clause License. # The full license terms are available here: https://github.com/OpenFabrics/sunfish_library_reference/blob/main/LICENSE @@ -22,4 +23,8 @@ def patch(): @abstractmethod def remove(): - pass \ No newline at end of file + pass + + @abstractmethod + def reset_resources(): + pass diff --git a/sunfish_plugins/storage/file_system_backend/backend_FS.py b/sunfish_plugins/storage/file_system_backend/backend_FS.py index 99c91b4..f0bf1a5 100644 --- a/sunfish_plugins/storage/file_system_backend/backend_FS.py +++ b/sunfish_plugins/storage/file_system_backend/backend_FS.py @@ -341,3 +341,32 @@ def remove(self, path:str): to_replace = False return "DELETE: file removed." + + + + def reset_resources(self, resource_path: str, clean_resource_path: str): + ### + # this command ONLY applies to the File System storage backend + # The arguments are: + # - clean_resource_path: "" + # - resource_path: "" + # there is no protection on the receipt of this command + # This command will not work if the backend file system is not the host's filesystem! + # + logger.info("reset_resources method called") + logger.info(f"fs root resource path is {resource_path}") + logger.info(f"clean_resource path is {clean_resource_path}") + try: + if os.path.exists(resource_path) and os.path.exists(clean_resource_path): + shutil.rmtree(resource_path) + shutil.copytree(clean_resource_path, resource_path) + logger.debug("reset_resources complete") + resp = "OK", 204 + else: + logger.debug("reset_resources: one or more paths do not exist.") + pass + except Exception: + raise Exception("reset_resources Failed") + resp = "Fail", 500 + return resp + From 039816a04cb75a1a1f0ee1cf579fcc0b885a2c89 Mon Sep 17 00:00:00 2001 From: rherrell Date: Wed, 31 Jul 2024 11:47:04 -0600 Subject: [PATCH 04/28] added Signed code and copyright Signed-off-by: rherrell --- sunfish/events/event_handler_interface.py | 0 .../objects_managers/sunfish_agent/agents_management.py | 3 ++- 2 files changed, 2 insertions(+), 1 deletion(-) mode change 100644 => 100755 sunfish/events/event_handler_interface.py mode change 100644 => 100755 sunfish_plugins/objects_managers/sunfish_agent/agents_management.py diff --git a/sunfish/events/event_handler_interface.py b/sunfish/events/event_handler_interface.py old mode 100644 new mode 100755 diff --git a/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py b/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py old mode 100644 new mode 100755 index 50c5fd4..803f86a --- a/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py +++ b/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py @@ -1,4 +1,5 @@ # Copyright IBM Corp. 2024 +# Copyright Hewlett Packard Enterprise Development LP 2024 # This software is available to you under a BSD 3-Clause License. # The full license terms are available here: https://github.com/OpenFabrics/sunfish_library_reference/blob/main/LICENSE @@ -42,7 +43,7 @@ def is_agent_managed(cls, sunfish_core: 'sunfish.lib.core.Core', path: string): collection = sunfish_core.storage_backend.read(path) logger.debug(f"Checking if the object {path} is managed by an Agent") - if "Oem" in collection and "Sunfish_RM" in collection["Oem"]: + if "Oem" in collection and "Sunfish_RM" in collection["Oem"] and "ManagingAgent" in collection: agent = collection["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] return Agent(sunfish_core, agent) From 2eebf524cc35242518b99f5bbd2fad4da05264af Mon Sep 17 00:00:00 2001 From: Christian Pinto Date: Fri, 26 Jul 2024 14:01:27 +0100 Subject: [PATCH 05/28] Removed circular call where the core library calls itself instead of using plugins directly. Signed-off-by: Christian Pinto Signed-off-by: Christian Pinto --- .../redfish/redfish_event_handler.py | 31 +++++++++++-------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 5a89292..d0800a0 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -5,6 +5,7 @@ import json import logging import os +import uuid import warnings import shutil from uuid import uuid4 @@ -45,13 +46,16 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event response = response.json() ### Save agent registration - connection_method_name = connectionMethodId.split('/')[-1] - connection_method_name = connectionMethodId[:-len(connection_method_name)] - event_handler.core.create_object(connection_method_name, response) + # connection_method_name = connectionMethodId.split('/')[-1] + # connection_method_name = connectionMethodId[:-len(connection_method_name)] + event_handler.core.storage_backend.write(response) - connection_method_template = { + aggregation_source_id = str(uuid.uuid4()) + aggregation_source_template = { "@odata.type": "#AggregationSource.v1_2_.AggregationSource", + "@odata.id": f"{event_handler.core.conf['redfish_root']}/AggregationService/AggregationSources/{aggregation_source_id}", "HostName": hostname, + "Id": aggregation_source_id, "Links": { "ConnectionMethod": { "@odata.id": connectionMethodId @@ -59,15 +63,11 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event "ResourcesAccessed": [] } } - try: - resp_post = event_handler.core.create_object( - os.path.join(event_handler.core.conf["redfish_root"], "AggregationService/AggregationSources"), - connection_method_template) + event_handler.core.storage_backend.write(aggregation_source_template) except Exception: raise Exception() - aggregation_source_id = resp_post['@odata.id'] agent_subscription_context = {"Context": aggregation_source_id.split('/')[-1]} resp_patch = requests.patch(f"{hostname}/redfish/v1/EventService/Subscriptions/SunfishServer", @@ -96,7 +96,13 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont add_aggregation_source_reference(response, aggregation_source) - event_handler.core.create_object(id, response) + # here we are assuming that we are getting a fully populated redfish + # object from the agent. + if "@odata.id" not in response: + logger.warning(f"Resource {id} did not have @odata.id set when retrieved from Agent. Initializing its value with {id}") + response["odata.id"] = id + + event_handler.core.storage_backend.write(response) RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) @@ -183,7 +189,6 @@ def __init__(self, core): self.redfish_root = core.conf["redfish_root"] self.fs_root = core.conf["backend_conf"]["fs_root"] self.subscribers_root = core.conf["backend_conf"]["subscribers_root"] - self.backend = core.storage_backend @classmethod def dispatch(cls, message_id: str, event_handler: EventHandlerInterface, event: dict, context: str): if message_id in cls.dispatch_table: @@ -246,7 +251,7 @@ def check_data_type(self, origin): resource = origin[length:] path = os.path.join(self.redfish_root, resource) try: - data = self.core.get_object(path) + data = self.core.storage_backend.read(path) except ResourceNotFound as e: raise ResourceNotFound(path) type = data["@odata.type"].split('.')[0] @@ -270,7 +275,7 @@ def forward_event(self, list, payload): for id in list: path = os.path.join(self.redfish_root, 'EventService', 'Subscriptions', id) try: - data = self.core.get_object(path) + data = self.core.storage_backend.read(path) # print('send to: ', data["Id"]) resp = requests.post(data['Destination'], json=payload) resp.raise_for_status() From b44998b6e86531062a480b602464587e79dddb98 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 09:14:48 +0000 Subject: [PATCH 06/28] Bump zipp from 3.17.0 to 3.19.1 Bumps [zipp](https://github.com/jaraco/zipp) from 3.17.0 to 3.19.1. - [Release notes](https://github.com/jaraco/zipp/releases) - [Changelog](https://github.com/jaraco/zipp/blob/main/NEWS.rst) - [Commits](https://github.com/jaraco/zipp/compare/v3.17.0...v3.19.1) --- updated-dependencies: - dependency-name: zipp dependency-type: indirect ... Signed-off-by: dependabot[bot] --- poetry.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index d562d53..4827465 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "blinker" @@ -194,18 +194,18 @@ watchdog = ["watchdog (>=2.3)"] [[package]] name = "zipp" -version = "3.17.0" +version = "3.19.1" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, - {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, + {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"}, + {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [metadata] lock-version = "2.0" From 39926a8a3c68532f7fe9dee8ccb49502ceb85a28 Mon Sep 17 00:00:00 2001 From: rherrell Date: Thu, 15 Aug 2024 16:16:02 -0600 Subject: [PATCH 07/28] removed un-used ClearResources event Signed-off-by: rherrell --- sunfish/events/event_handler_interface.py | 0 sunfish/storage/backend_interface.py | 2 +- .../redfish/redfish_event_handler.py | 29 +------------------ .../sunfish_agent/agents_management.py | 0 4 files changed, 2 insertions(+), 29 deletions(-) mode change 100755 => 100644 sunfish/events/event_handler_interface.py mode change 100755 => 100644 sunfish_plugins/objects_managers/sunfish_agent/agents_management.py diff --git a/sunfish/events/event_handler_interface.py b/sunfish/events/event_handler_interface.py old mode 100755 new mode 100644 diff --git a/sunfish/storage/backend_interface.py b/sunfish/storage/backend_interface.py index 765514a..b1ee9c3 100644 --- a/sunfish/storage/backend_interface.py +++ b/sunfish/storage/backend_interface.py @@ -1,5 +1,5 @@ # Copyright IBM Corp. 2023 -# Copyright Hewlett Packard Enterprise 2024 +# Copyright Hewlett Packard Enterprise Development LP 2024 # This software is available to you under a BSD 3-Clause License. # The full license terms are available here: https://github.com/OpenFabrics/sunfish_library_reference/blob/main/LICENSE diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index d0800a0..cf47d4d 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -108,32 +108,6 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont event_handler.core.storage_backend.patch(id, aggregation_source) - - @classmethod - def ClearResources(cls, event_handler: EventHandlerInterface, event: dict, context: str): - ### - # Receipt of this event will cause the core library to remove the entire Resources tree, and reload a clean initial tree - # This will happen upon the Core receiving an event with MessageId: ClearResources - # The arguments of the event message are: - # - Arg0: " - # there is no protection on the receipt of this event - # This event will not work if the backend file system is not the host's filesystem! - # - logger.info("ClearResources method called") - resource_path = event['MessageArgs'][0] # relative Resource Path - logger.info(f"ClearResources path is {resource_path}") - try: - if os.path.exists('Resources'): - shutil.rmtree('Resources') - - shutil.copytree(resource_path, 'Resources') - resp = 204 - except Exception: - raise Exception("ClearResources Failed") - resp = 500 - return resp - - @classmethod def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context: str): ### @@ -175,8 +149,7 @@ class RedfishEventHandler(EventHandlerInterface): dispatch_table = { "AggregationSourceDiscovered": RedfishEventHandlersTable.AggregationSourceDiscovered, "ResourceCreated": RedfishEventHandlersTable.ResourceCreated, - "TriggerEvent": RedfishEventHandlersTable.TriggerEvent, - "ClearResources" : RedfishEventHandlersTable.ClearResources + "TriggerEvent": RedfishEventHandlersTable.TriggerEvent } def __init__(self, core): diff --git a/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py b/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py old mode 100755 new mode 100644 From 1196a30aad4543ae17cf2ca30b19c806ab856f84 Mon Sep 17 00:00:00 2001 From: rherrell Date: Mon, 19 Aug 2024 13:59:25 -0600 Subject: [PATCH 08/28] ready to rebase with main Signed-off-by: rherrell --- .../events_handlers/redfish/redfish_event_handler.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index cf47d4d..8c1c25d 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -95,7 +95,6 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont response = response.json() add_aggregation_source_reference(response, aggregation_source) - # here we are assuming that we are getting a fully populated redfish # object from the agent. if "@odata.id" not in response: @@ -125,7 +124,6 @@ def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context logger.info(f"file_to_send path is {file_to_send}") try: if os.path.exists('file_to_send'): - #shutil.rmtree('Resources') print("found the event file") # event_to_send = contents of file_to_send From 89ad9655bdf4caeb60a92c4094e42d48a09244e2 Mon Sep 17 00:00:00 2001 From: rherrell Date: Mon, 19 Aug 2024 17:24:38 -0600 Subject: [PATCH 09/28] Revert "Removed circular call where the core library calls itself instead of using plugins directly." This reverts commit 2eebf524cc35242518b99f5bbd2fad4da05264af. --- .../redfish/redfish_event_handler.py | 27 ++++++++++--------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 8c1c25d..a5c09d6 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -5,7 +5,6 @@ import json import logging import os -import uuid import warnings import shutil from uuid import uuid4 @@ -46,16 +45,13 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event response = response.json() ### Save agent registration - # connection_method_name = connectionMethodId.split('/')[-1] - # connection_method_name = connectionMethodId[:-len(connection_method_name)] - event_handler.core.storage_backend.write(response) + connection_method_name = connectionMethodId.split('/')[-1] + connection_method_name = connectionMethodId[:-len(connection_method_name)] + event_handler.core.create_object(connection_method_name, response) - aggregation_source_id = str(uuid.uuid4()) - aggregation_source_template = { + connection_method_template = { "@odata.type": "#AggregationSource.v1_2_.AggregationSource", - "@odata.id": f"{event_handler.core.conf['redfish_root']}/AggregationService/AggregationSources/{aggregation_source_id}", "HostName": hostname, - "Id": aggregation_source_id, "Links": { "ConnectionMethod": { "@odata.id": connectionMethodId @@ -63,11 +59,15 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event "ResourcesAccessed": [] } } + try: - event_handler.core.storage_backend.write(aggregation_source_template) + resp_post = event_handler.core.create_object( + os.path.join(event_handler.core.conf["redfish_root"], "AggregationService/AggregationSources"), + connection_method_template) except Exception: raise Exception() + aggregation_source_id = resp_post['@odata.id'] agent_subscription_context = {"Context": aggregation_source_id.split('/')[-1]} resp_patch = requests.patch(f"{hostname}/redfish/v1/EventService/Subscriptions/SunfishServer", @@ -101,7 +101,9 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont logger.warning(f"Resource {id} did not have @odata.id set when retrieved from Agent. Initializing its value with {id}") response["odata.id"] = id - event_handler.core.storage_backend.write(response) + #event_handler.core.storage_backend.write(response) + + event_handler.core.create_object(id, response) RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) @@ -160,6 +162,7 @@ def __init__(self, core): self.redfish_root = core.conf["redfish_root"] self.fs_root = core.conf["backend_conf"]["fs_root"] self.subscribers_root = core.conf["backend_conf"]["subscribers_root"] + self.backend = core.storage_backend @classmethod def dispatch(cls, message_id: str, event_handler: EventHandlerInterface, event: dict, context: str): if message_id in cls.dispatch_table: @@ -222,7 +225,7 @@ def check_data_type(self, origin): resource = origin[length:] path = os.path.join(self.redfish_root, resource) try: - data = self.core.storage_backend.read(path) + data = self.core.get_object(path) except ResourceNotFound as e: raise ResourceNotFound(path) type = data["@odata.type"].split('.')[0] @@ -246,7 +249,7 @@ def forward_event(self, list, payload): for id in list: path = os.path.join(self.redfish_root, 'EventService', 'Subscriptions', id) try: - data = self.core.storage_backend.read(path) + data = self.core.get_object(path) # print('send to: ', data["Id"]) resp = requests.post(data['Destination'], json=payload) resp.raise_for_status() From fb16e0d40daec017a9ea3ddb8ba34fa85848c55a Mon Sep 17 00:00:00 2001 From: rherrell Date: Fri, 23 Aug 2024 17:43:41 -0600 Subject: [PATCH 10/28] recursive fetch fixes WIP Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 35 ++-- .../sunfish_agent/agents_management.py | 2 +- .../storage/file_system_backend/backend_FS.py | 151 ++++++++++++++++++ 3 files changed, 176 insertions(+), 12 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index a5c09d6..40a306b 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -93,8 +93,10 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont if response.status_code != 200: raise Exception("Cannot find ConnectionMethod") response = response.json() + print(f"new resource is \n") + print(json.dumps(response, indent=4)) - add_aggregation_source_reference(response, aggregation_source) + # add_aggregation_source_reference(response, aggregation_source) # here we are assuming that we are getting a fully populated redfish # object from the agent. if "@odata.id" not in response: @@ -103,7 +105,8 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont #event_handler.core.storage_backend.write(response) - event_handler.core.create_object(id, response) + # shouldn't be writing the new object before 'inspecting it' + #event_handler.core.create_object(id, response) RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) @@ -290,12 +293,14 @@ def handleNestedObject(self, obj): if type(obj) == dict: for key,value in obj.items(): if key == '@odata.id': + print(f"found URL to Redfish obj {value}") RedfishEventHandler.handleEntryIfNotVisited(self, value, visited, queue) - elif type(value) == list or type(value) == dict: - handleNestedObject(self, value) + elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): + handleNestedObject(self, value) # need to ignore Sunfish_RM paths; they are wrong namespace while queue: queue = sorted(queue) + print(f"sorted queue: \n{queue}") id = queue.pop(0) redfish_obj = RedfishEventHandler.fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched) @@ -304,13 +309,14 @@ def handleNestedObject(self, obj): continue for key, val in redfish_obj.items(): - if key == 'Links': - if type(val)==dict or type(val)==list: - handleNestedObject(self, val) if key == '@odata.id': RedfishEventHandler.handleEntryIfNotVisited(self, val, visited, queue) + print(f"found URL to Redfish obj {val}") pass - if type(val) == list or type(val) == dict: + #elif key == 'Links': + # if type(val)==dict or type(val)==list: + # handleNestedObject(self, val) + elif type(val) == list or type(val) == dict: handleNestedObject(self, val) return visited @@ -338,27 +344,31 @@ def handleEntryIfNotVisited(self,entry, visited, queue): def fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched): # if have no parent dirs path_nodes = id.split("/") need_parent_prefetch = False + print(f"fetchResourceAndTree path_nodes {path_nodes}") for node_position in range(4, len(path_nodes) - 1): redfish_path = f'/redfish/v1/{"/".join(path_nodes[3:node_position + 1])}' logger.info(f"Checking redfish path: {redfish_path}") - if redfish_path not in visited: + print(f"visit path {redfish_path} ?") + if redfish_path not in visited: # last path is always in visited queue! need_parent_prefetch = True logger.info(f"Inspect redfish path: {redfish_path}") + print(f"adding redfish path to queue: {redfish_path}") queue.append(redfish_path) visited.append(redfish_path) if need_parent_prefetch: # requeue queue.append(id) - else: + else: # all parent objects have been visited redfish_obj = RedfishEventHandler.fetchResource(self, id, aggregation_source) fetched.append(id) return redfish_obj def fetchResource(self, obj_id, aggregation_source): + # only called if all parent objects have been put in queue and thus already fetched from aggregation_source resource_endpoint = aggregation_source["HostName"] + obj_id logger.info(f"fetch: {resource_endpoint}") response = requests.get(resource_endpoint) - if response.status_code == 200: + if response.status_code == 200: # Agent must have returned this object redfish_obj = response.json() RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source) @@ -375,6 +385,7 @@ def createInspectedObject(self,redfish_obj, aggregation_source): file_path = os.path.join(self.conf['redfish_root'], obj_path) if 'Collection' not in redfish_obj['@odata.type']: + # re-write this to explicitly check for object's existence in Sunfish! try: if self.get_object(file_path) == redfish_obj: pass @@ -382,6 +393,8 @@ def createInspectedObject(self,redfish_obj, aggregation_source): warnings.warn('Resource state changed') except ResourceNotFound: add_aggregation_source_reference(redfish_obj, aggregation_source) + # do we change the following to a simple FS write? + print(f"creating object: {file_path}") self.create_object(file_path, redfish_obj) else: logger.debug("This is a collection") diff --git a/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py b/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py index 803f86a..1cb00d5 100644 --- a/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py +++ b/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py @@ -43,7 +43,7 @@ def is_agent_managed(cls, sunfish_core: 'sunfish.lib.core.Core', path: string): collection = sunfish_core.storage_backend.read(path) logger.debug(f"Checking if the object {path} is managed by an Agent") - if "Oem" in collection and "Sunfish_RM" in collection["Oem"] and "ManagingAgent" in collection: + if "Oem" in collection and "Sunfish_RM" in collection["Oem"] and "ManagingAgent" in collection["Oem"]["Sunfish_RM"]: agent = collection["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] return Agent(sunfish_core, agent) diff --git a/sunfish_plugins/storage/file_system_backend/backend_FS.py b/sunfish_plugins/storage/file_system_backend/backend_FS.py index f0bf1a5..5e9dc52 100644 --- a/sunfish_plugins/storage/file_system_backend/backend_FS.py +++ b/sunfish_plugins/storage/file_system_backend/backend_FS.py @@ -45,6 +45,7 @@ def read(self, path: str) -> dict: except FileNotFoundError as e: raise ResourceNotFound(resource) + ''' def write(self, payload: dict): """Checks if the Collection exists for that resource and stores the resource in the correct position of the file system. It create the directory of the resource, creates the index.json file and updates the files linked with the new resource (Collection members or Resources list). @@ -65,17 +66,21 @@ def write(self, payload: dict): length = len(self.redfish_root) id = payload['@odata.id'][length:] # id without redfish.root (es. /redfish/v1/) + print(f"BackendFS.write called on {id}") id = id.split('/') for index in range(2, len(id[1:])): to_check = os.path.join('/'.join(id[:index]), 'index.json') to_check = os.path.join(os.getcwd(), self.root, to_check) + print(f"BackendFS.write(): path to check: {to_check}") if os.path.exists(to_check) is False: + print("path does not exist\n") raise ActionNotAllowed() with open(to_check, 'r') as data_json: data = json.load(data_json) data_json.close() if 'Collection' in data["@odata.type"]: + print("path is to a Collection\n") members = data["Members"] for x in members: if x["@odata.id"] == os.path.join(self.redfish_root, '/'.join(id[:index + 1])): @@ -90,6 +95,7 @@ def write(self, payload: dict): present = True else: el["@odata.id"] = os.path.join(self.redfish_root, '/'.join(id[:index + 1])) + print(f"BackendFS.write of {el['@odata.id']}") with open(to_check, 'w') as data_json: json.dump(data, data_json, indent=4, sort_keys=True) data_json.close() @@ -158,6 +164,151 @@ def write(self, payload: dict): + with open(os.path.join(folder_id_path, "index.json"), "w") as fd: + fd.write(json.dumps(payload, indent=4, sort_keys=True)) + fd.close() + + json_collection_path = os.path.join(collection_path, 'index.json') + + # updates the collection with the new element created + if os.path.exists(json_collection_path): + utils.update_collections_json(path=json_collection_path, link=payload['@odata.id']) + else: + utils.generate_collection(collection_type) + + # Events have to be handled in a different way. + # To check if write() is called by an event subscription (EventDestination format) I check 'Destination' because + # it is the only required required property that other objects doesnt have + + logging.info('BackendFS: [POST] success') + return payload + + ''' + def write(self, payload: dict): + """Checks if the Collection exists for that resource and stores the resource in the correct position of the file system. + It create the directory of the resource, creates the index.json file and updates the files linked with the new resource (Collection members or Resources list). + + Args: + payload (json): json representing the resource that should be stored. + + Raises: + CollectionNotSupported: the storage of the collections is not supported. + AlreadyExists: it is not possible to have duplicate resources with the same ID. + + Returns: + json: stored data + """ + logging.info('new_BackendFS write called') + + # get ID and collection from payload + length = len(self.redfish_root) + id = payload['@odata.id'][length:] # id without redfish.root (es. /redfish/v1/) + + print(f"BackendFS.write called on {id}") + id = id.split('/') + print(f"BackendFS.write split id == {id}") + for index in range(2, len(id[1:])): + to_check = os.path.join('/'.join(id[:index]), 'index.json') + to_check = os.path.join(os.getcwd(), self.root, to_check) + print(f"BackendFS.write(): path to check: {to_check}") + if os.path.exists(to_check) is False: + print("path to object directory (parent object) does not exist\n") + raise ActionNotAllowed() + + with open(to_check, 'r') as data_json: + data = json.load(data_json) + data_json.close() + if 'Collection' in data["@odata.type"]: + print("path is to a Collection\n") + members = data["Members"] + for x in members: + if x["@odata.id"] == os.path.join(self.redfish_root, '/'.join(id[:index + 1])): + present = True + else: + print("path is to assumed list object") + if data[id[index]]: + element = data[id[index]] + print(f"element {id} found is {element}") + if type(element) is not list: + continue + for el in element: + if el["@odata.id"] == os.path.join(self.redfish_root, '/'.join(id[:index + 1])): + present = True + else: + el["@odata.id"] = os.path.join(self.redfish_root, '/'.join(id[:index + 1])) + print(f"BackendFS.write of {el['@odata.id']}") + with open(to_check, 'w') as data_json: + json.dump(data, data_json, indent=4, sort_keys=True) + data_json.close() + else: + print("no element found???") + + last_element = len(id) - 1 + collection_type = id[last_element - 1] + resource_id = id[last_element] + full_collection = '' + # create the path of the full collection if it is a subcollection + if len(id) > 2: + for i in range(0, last_element - 1): + full_collection = full_collection + id[i] + '/' + + collection_type = os.path.join(full_collection, collection_type) + + collection_path = os.path.join(os.getcwd(), self.root, + collection_type) # collection_path .../Resources/[folder], collection_type = [folder] + print(f"backendFS.write: collection_path is {collection_path}") + parent_path = os.path.dirname(collection_path) # parent path .../Resources + + # check if the directory of the Collection already exists + if not os.path.exists(collection_path): + print(f"makinge collection path") + os.makedirs(collection_path) + + config = utils.generate_collection(collection_type) + + # if the item to be written is managed by an agent, we want the collection containing it to also be marked + # accordingly. We do this only for collections to be created because we assume that if the collection is + # there already: + # a. The collection is a first level one that is managed by Sunfish + # b. The collection was previously created during an agent discovery process and therefore already marked + # if "Oem" in payload and "Sunfish_RM" in payload["Oem"] and len(id) > 2 : + # if "Oem" not in config: + # config["Oem"] = {} + # config["Oem"]["Sunfish_RM"] = payload["Oem"]["Sunfish_RM"] + + ## write file Resources/[folder]/index.json + with open(os.path.join(collection_path, "index.json"), "w") as fd: + fd.write(json.dumps(config, indent=4, sort_keys=True)) + fd.close() + + # check if the index.json representing the collection exists. In case it doesnt it will create index.json with the collection template + if os.path.exists(os.path.join(parent_path, "index.json")): + collection_name = collection_type.split('/')[-1] + utils.update_collections_parent_json(path=os.path.join(parent_path, "index.json"), type=collection_name, + link=self.redfish_root + collection_type) + else: + utils.generate_collection(collection_type) + else: + # checks if there is already a resource with the same id + index_path = os.path.join(collection_path, "index.json") + if utils.check_unique_id(index_path, payload['@odata.id']) is False: + raise AlreadyExists(payload['@odata.id']) + + # creates folder of the element and write index.json (assuming that the payload is valid i dont use any kind of template to write index.json) + folder_id_path = os.path.join(collection_path, resource_id) # .../Resources/[folder]/[id] + + # creates the folder of the element + if not os.path.exists(folder_id_path): + os.mkdir(folder_id_path) + parent_path = os.path.join(*folder_id_path.split("/")[:-2]) + parent_json = "/" + os.path.join(parent_path, "index.json") + root_path = os.path.join(os.getcwd(), self.root) + if not os.path.exists(parent_json) and parent_path != root_path[1:]: + logger.warning( + "You should not be here, this is crating an entire path where multiple folders are not existing") + + + with open(os.path.join(folder_id_path, "index.json"), "w") as fd: fd.write(json.dumps(payload, indent=4, sort_keys=True)) fd.close() From 91200da65e6c645cd1a3dd21ac47bb982f4db86a Mon Sep 17 00:00:00 2001 From: rherrell Date: Fri, 6 Sep 2024 19:25:40 -0600 Subject: [PATCH 11/28] fixed incorrect assumption about posting only to Collections Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 101 ++++++++++++++++-- .../storage/file_system_backend/backend_FS.py | 85 +++++++++++---- 2 files changed, 153 insertions(+), 33 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 40a306b..c13beb2 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -77,21 +77,24 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event @classmethod def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, context: str): + # incoming context (an aggregation_source ID) comes from event sender if context == "": raise PropertyNotFound("Missing agent context in ResourceCreated event") logger.info("New resource created") - id = event['OriginOfCondition']['@odata.id'] # /redfish/v1/Fabrics/CXL + id = event['OriginOfCondition']['@odata.id'] # ex: /redfish/v1/Fabrics/CXL + logger.info(f"aggregation_source's redfish URI: {id}") + # must have an aggregation_source object to assign as owner of new resource aggregation_source = event_handler.core.storage_backend.read( os.path.join(event_handler.core.conf["redfish_root"], "AggregationService", "AggregationSources", context) ) hostname = aggregation_source["HostName"] - response = requests.get(f"{hostname}/{id}") + if response.status_code != 200: - raise Exception("Cannot find ConnectionMethod") + raise Exception("Cannot find new resource at aggregation_source") response = response.json() print(f"new resource is \n") print(json.dumps(response, indent=4)) @@ -103,9 +106,8 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont logger.warning(f"Resource {id} did not have @odata.id set when retrieved from Agent. Initializing its value with {id}") response["odata.id"] = id - #event_handler.core.storage_backend.write(response) - # shouldn't be writing the new object before 'inspecting it' + #event_handler.core.storage_backend.write(response) #event_handler.core.create_object(id, response) RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) @@ -282,6 +284,8 @@ def bfsInspection(self, node, aggregation_source): queue = [] visited = [] fetched = [] + notfound = [] + uploaded = [] visited.append(node['@odata.id']) queue.append(node['@odata.id']) @@ -304,6 +308,8 @@ def handleNestedObject(self, obj): id = queue.pop(0) redfish_obj = RedfishEventHandler.fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched) + if redfish_obj is None: # we failed to locate it in aggregation_source + notfound.append(id) if redfish_obj is None or type(redfish_obj) != dict: logger.info(f"Resource - {id} - not available") continue @@ -316,9 +322,58 @@ def handleNestedObject(self, obj): #elif key == 'Links': # if type(val)==dict or type(val)==list: # handleNestedObject(self, val) + # + # keep extracting nested @odata.id references from the currently fetched object elif type(val) == list or type(val) == dict: handleNestedObject(self, val) - return visited + print("\n\nattempted to fetch the following URIs:\n") + print(json.dumps(sorted(fetched),indent = 4)) + print("\n\nAgent did not return objects for the following URIs:\n") + print(json.dumps(sorted(notfound),indent = 4)) + return visited #why not the 'fetched' list? + + def create_uploaded_object(self, path: str, payload: dict): + # before to add the ID and to call the methods there should be the json validation + + # generate unique uuid if is not present + if '@odata.id' not in payload and 'Id' not in payload: + pass + #id = str(uuid.uuid4()) + #to_add = { + #'Id': id, + #'@odata.id': os.path.join(path, id) + #} + #payload.update(to_add) + raise exception(f"create_uploaded_object: no Redfish ID (@odata.id) found") + + #object_type = self._get_type(payload) + # we assume agents can upload collections, just not the root level collections + # we will check for uploaded collections later + #if "Collection" in object_type: + #raise CollectionNotSupported() + + payload_to_write = payload + + try: + # 1. check the path target of the operation exists + # self.storage_backend.read(path) + # 2. we don't check the manager; we assume uploading agent is the manager unless it says otherwise + #agent_response = self.objects_manager.forward_to_manager(SunfishRequestType.CREATE, path, payload=payload) + #if agent_response: + #payload_to_write = agent_response + # 3. should be no custom handler, this is not a POST, we upload the objects directly into the Redfish database + #self.objects_handler.dispatch(object_type, path, SunfishRequestType.CREATE, payload=payload) + pass + except ResourceNotFound: + logger.error("The collection where the resource is to be created does not exist.") + except AgentForwardingFailure as e: + raise e + except AttributeError: + # The object does not have a handler. + logger.debug(f"The object {object_type} does not have a custom handler") + pass + # 4. persist change in Sunfish tree + return self.storage_backend.write(payload_to_write) def get_aggregation_source(self, aggregation_source): try: @@ -349,21 +404,23 @@ def fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched): redfish_path = f'/redfish/v1/{"/".join(path_nodes[3:node_position + 1])}' logger.info(f"Checking redfish path: {redfish_path}") print(f"visit path {redfish_path} ?") - if redfish_path not in visited: # last path is always in visited queue! + if redfish_path not in visited: need_parent_prefetch = True logger.info(f"Inspect redfish path: {redfish_path}") print(f"adding redfish path to queue: {redfish_path}") queue.append(redfish_path) visited.append(redfish_path) - if need_parent_prefetch: # requeue + if need_parent_prefetch: # requeue this id and return 'None' queue.append(id) - else: # all parent objects have been visited + else: # all grand-parent objects have been visited + # go get this object from the aggregation_source redfish_obj = RedfishEventHandler.fetchResource(self, id, aggregation_source) fetched.append(id) return redfish_obj def fetchResource(self, obj_id, aggregation_source): - # only called if all parent objects have been put in queue and thus already fetched from aggregation_source + # only called if all grand-parent objects have been put in queue, sorted, inspected, and already fetched. + # The parent object, if not a collection, will also have already been fetched resource_endpoint = aggregation_source["HostName"] + obj_id logger.info(f"fetch: {resource_endpoint}") response = requests.get(resource_endpoint) @@ -371,6 +428,7 @@ def fetchResource(self, obj_id, aggregation_source): if response.status_code == 200: # Agent must have returned this object redfish_obj = response.json() + # now copy object into Sunfish inventory RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source) if redfish_obj['@odata.id'] not in aggregation_source["Links"]["ResourcesAccessed"]: aggregation_source["Links"]["ResourcesAccessed"].append(redfish_obj['@odata.id']) @@ -383,7 +441,9 @@ def createInspectedObject(self,redfish_obj, aggregation_source): raise PropertyNotFound(f"missing @odata.id in \n {json.dumps(redfish_obj, indent=2)}") file_path = os.path.join(self.conf['redfish_root'], obj_path) + logger.debug(f"try creating object: {file_path}") + ''' if 'Collection' not in redfish_obj['@odata.type']: # re-write this to explicitly check for object's existence in Sunfish! try: @@ -398,6 +458,27 @@ def createInspectedObject(self,redfish_obj, aggregation_source): self.create_object(file_path, redfish_obj) else: logger.debug("This is a collection") + ''' + if 'Collection' in redfish_obj['@odata.type']: + logger.debug("This is a collection, ignore it until we need it") + pass + else: + if os.path.exists(file_path): + # check if existing Sunfish object is same as that being fetched from aggregation_source + # we have more work to do disambiguate duplicate names from different agents + # for now we will just check to be sure we are uploading an actual identical object + # (which shouldn't happen since we are adding in the Sunfish_RM details) + if self.get_object(file_path) == redfish_obj: + warnings.warn('Duplicate Resource found, ignored') + pass + elif self.get_object(file_path) != redfish_obj: + warnings.warn('Resource state changed') + # put object change checks and updates here + + else: # assume new object, create it and its parent collection if needed + add_aggregation_source_reference(redfish_obj, aggregation_source) + print(f"creating object: {file_path}") + RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) def add_aggregation_source_reference(redfish_obj, aggregation_source): # BoundaryComponent = ["true", "false", "unknown"] diff --git a/sunfish_plugins/storage/file_system_backend/backend_FS.py b/sunfish_plugins/storage/file_system_backend/backend_FS.py index 5e9dc52..6d4c055 100644 --- a/sunfish_plugins/storage/file_system_backend/backend_FS.py +++ b/sunfish_plugins/storage/file_system_backend/backend_FS.py @@ -2,6 +2,7 @@ # This software is available to you under a BSD 3-Clause License. # The full license terms are available here: https://github.com/OpenFabrics/sunfish_library_reference/blob/main/LICENSE +import pdb import json import logging import os @@ -185,7 +186,8 @@ def write(self, payload: dict): ''' def write(self, payload: dict): - """Checks if the Collection exists for that resource and stores the resource in the correct position of the file system. + """ + Checks if the Collection exists for that resource and stores the resource in the correct position of the file system. It create the directory of the resource, creates the index.json file and updates the files linked with the new resource (Collection members or Resources list). Args: @@ -202,19 +204,22 @@ def write(self, payload: dict): # get ID and collection from payload length = len(self.redfish_root) - id = payload['@odata.id'][length:] # id without redfish.root (es. /redfish/v1/) + redfishId = payload['@odata.id'][length:] # id without redfish.root (es. /redfish/v1/) + parent_is_collection = True # default assumption - print(f"BackendFS.write called on {id}") - id = id.split('/') + print(f"BackendFS.write called on {redfishId}") + id = redfishId.split('/') print(f"BackendFS.write split id == {id}") + #pdb.set_trace() for index in range(2, len(id[1:])): to_check = os.path.join('/'.join(id[:index]), 'index.json') to_check = os.path.join(os.getcwd(), self.root, to_check) print(f"BackendFS.write(): path to check: {to_check}") if os.path.exists(to_check) is False: - print("path to object directory (parent object) does not exist\n") + print("intermediate path above the parent object does not exist\n") raise ActionNotAllowed() + ''' with open(to_check, 'r') as data_json: data = json.load(data_json) data_json.close() @@ -242,28 +247,36 @@ def write(self, payload: dict): data_json.close() else: print("no element found???") + ''' - last_element = len(id) - 1 - collection_type = id[last_element - 1] - resource_id = id[last_element] + # we get here if all paths through grandparent object already exist + last_element = len(id) - 1 + collection_type = id[last_element - 1] # path element for parent object + resource_id = id[last_element] # actual redfish object to create full_collection = '' - # create the path of the full collection if it is a subcollection + # find the path of the parent object which is should NOT be assumed to be a 'Collection' if len(id) > 2: for i in range(0, last_element - 1): full_collection = full_collection + id[i] + '/' + print(f"backendFS.write: building full_collection: {full_collection}") collection_type = os.path.join(full_collection, collection_type) collection_path = os.path.join(os.getcwd(), self.root, - collection_type) # collection_path .../Resources/[folder], collection_type = [folder] + collection_type) print(f"backendFS.write: collection_path is {collection_path}") - parent_path = os.path.dirname(collection_path) # parent path .../Resources + parent_path = os.path.dirname(collection_path) + print(f"backendFS.write: parent_path is {parent_path}") - # check if the directory of the Collection already exists + # check if the directory of the parent object already exists + pdb.set_trace() if not os.path.exists(collection_path): - print(f"makinge collection path") + # if parent directory doesn't exist, we assume it is a collection and create the collection + print(f"backendFS.write: making collection path directory") os.makedirs(collection_path) + # the following line assumes the path element name dictates the collection type + # it is more proper to examine the @odata.type property of the object being created! config = utils.generate_collection(collection_type) # if the item to be written is managed by an agent, we want the collection containing it to also be marked @@ -276,28 +289,51 @@ def write(self, payload: dict): # config["Oem"] = {} # config["Oem"]["Sunfish_RM"] = payload["Oem"]["Sunfish_RM"] - ## write file Resources/[folder]/index.json + ## write file Resources/[folder]/index.json for the newly created Collection object with open(os.path.join(collection_path, "index.json"), "w") as fd: + print(f"backendFS.write: writing collection path object\n {json.dumps(config, indent=4)}") fd.write(json.dumps(config, indent=4, sort_keys=True)) fd.close() - # check if the index.json representing the collection exists. In case it doesnt it will create index.json with the collection template + # check if the index.json representing the parent collection exists. In case it doesnt it will create index.json with the collection template if os.path.exists(os.path.join(parent_path, "index.json")): + with open(os.path.join(parent_path, "index.json"), 'r') as parent_json: + parent_data = json.load(parent_json) + parent_json.close() + if 'Collection' in parent_data["@odata.type"]: + print("parent path is to a Collection\n") + print(f"members = {data['Members']}") + pass + collection_name = collection_type.split('/')[-1] + print(f"backendFS.write: updating parent collection at {os.path.join(parent_path, 'index.json')}") utils.update_collections_parent_json(path=os.path.join(parent_path, "index.json"), type=collection_name, link=self.redfish_root + collection_type) else: + # no parent collection, so we will make it up! utils.generate_collection(collection_type) else: + # parent object already exists, is it a Collection? # checks if there is already a resource with the same id index_path = os.path.join(collection_path, "index.json") - if utils.check_unique_id(index_path, payload['@odata.id']) is False: - raise AlreadyExists(payload['@odata.id']) + with open(index_path, 'r') as data_json: + parent_data = json.load(data_json) + data_json.close() + if 'Collection' in parent_data["@odata.type"]: + print("path is to a Collection\n") + if utils.check_unique_id(index_path, payload['@odata.id']) is False: + raise AlreadyExists(payload['@odata.id']) + pass + else: + print("path is to an object\n") + parent_is_collection = False # + pass # creates folder of the element and write index.json (assuming that the payload is valid i dont use any kind of template to write index.json) - folder_id_path = os.path.join(collection_path, resource_id) # .../Resources/[folder]/[id] + folder_id_path = os.path.join(collection_path, resource_id) # .../Resources/[folder]/[resource_id] - # creates the folder of the element + # if folder does not exist, check the parent path + # not sure we need this next check given we do the same above if not os.path.exists(folder_id_path): os.mkdir(folder_id_path) parent_path = os.path.join(*folder_id_path.split("/")[:-2]) @@ -309,6 +345,7 @@ def write(self, payload: dict): + print(f"backend_FS.write: writing {folder_id_path}/index.json") with open(os.path.join(folder_id_path, "index.json"), "w") as fd: fd.write(json.dumps(payload, indent=4, sort_keys=True)) fd.close() @@ -316,10 +353,12 @@ def write(self, payload: dict): json_collection_path = os.path.join(collection_path, 'index.json') # updates the collection with the new element created - if os.path.exists(json_collection_path): - utils.update_collections_json(path=json_collection_path, link=payload['@odata.id']) - else: - utils.generate_collection(collection_type) + if parent_is_collection: # need to insert new member into collection + if os.path.exists(json_collection_path): + utils.update_collections_json(path=json_collection_path, link=payload['@odata.id']) + else: + utils.generate_collection(collection_type) + pass # Events have to be handled in a different way. # To check if write() is called by an event subscription (EventDestination format) I check 'Destination' because From 7d4ca8e142fbf569e05b4562b0c63d8c3a84248c Mon Sep 17 00:00:00 2001 From: rherrell Date: Wed, 11 Sep 2024 19:33:43 -0600 Subject: [PATCH 12/28] more fixes in forward_to_agent logic Signed-off-by: rherrell --- .../sunfish_agent/agents_management.py | 37 +++- .../sunfish_agent/sunfish_agent_manager.py | 4 + .../storage/file_system_backend/backend_FS.py | 184 ++---------------- 3 files changed, 43 insertions(+), 182 deletions(-) diff --git a/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py b/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py index 1cb00d5..3dd21a1 100644 --- a/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py +++ b/sunfish_plugins/objects_managers/sunfish_agent/agents_management.py @@ -4,6 +4,7 @@ # The full license terms are available here: https://github.com/OpenFabrics/sunfish_library_reference/blob/main/LICENSE import json +import os import logging import string import requests @@ -36,21 +37,34 @@ def get_id(self) -> string: def is_agent_managed(cls, sunfish_core: 'sunfish.lib.core.Core', path: string): # if this is a top level resource, there's no need to check for the agent as no agent can own top level ones. # Example of top levels is Systems, Chassis, etc... - level = len(path.replace(sunfish_core.conf["redfish_root"], "").split("/")) + path_to_owner = (path.replace(sunfish_core.conf["redfish_root"], "").split("/")) + level = len(path_to_owner) if level == 1: return None + # path passed in is to parent object, which is usually a collection collection = sunfish_core.storage_backend.read(path) + if 'Collection' in collection["@odata.type"]: + print(f"parent obj {path} is a Collection.") + new_path = os.path.join('/'.join(path_to_owner[:-1])) + print(f"grandparent obj at {new_path}") + collection = sunfish_core.storage_backend.read(new_path) + logger.debug(f"Checking if the object {new_path} is managed by an Agent") + if "Oem" in collection and "Sunfish_RM" in collection["Oem"] and "ManagingAgent" in collection["Oem"]["Sunfish_RM"]: + agent_path = collection["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] + return Agent(sunfish_core, agent_path) + else: + # + logger.debug(f"Checking if the object {path} is managed by an Agent") + if "Oem" in collection and "Sunfish_RM" in collection["Oem"] and "ManagingAgent" in collection["Oem"]["Sunfish_RM"]: + agent_path = collection["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] + return Agent(sunfish_core, agent_path) - logger.debug(f"Checking if the object {path} is managed by an Agent") - if "Oem" in collection and "Sunfish_RM" in collection["Oem"] and "ManagingAgent" in collection["Oem"]["Sunfish_RM"]: - agent = collection["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] - return Agent(sunfish_core, agent) return None def _forward_get_request(self, path: string) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path logger.debug(f"Forwarding resource GET request {resource_uri}") try: @@ -68,7 +82,8 @@ def _forward_get_request(self, path: string) -> dict: raise e def _forward_create_request(self, path: string, payload: dict) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path + #resource_uri = agent_uri+ "/" + path logger.debug(f"Forwarding resource CREATE request {resource_uri}") try: @@ -86,7 +101,7 @@ def _forward_create_request(self, path: string, payload: dict) -> dict: raise e def _forward_delete_request(self, path: string) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path logger.debug(f"Forwarding resource DELETE request {resource_uri}") try: @@ -102,7 +117,7 @@ def _forward_delete_request(self, path: string) -> dict: raise e def _forward_patch_request(self, path: string, payload: dict) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path logger.debug(f"Forwarding resource PATCH request {resource_uri}") try: @@ -122,7 +137,7 @@ def _forward_patch_request(self, path: string, payload: dict) -> dict: raise e def _forward_replace_request(self, path: string, payload: dict) -> dict: - resource_uri = self.aggregation_source["HostName"] + "/" + path + resource_uri = str(self.aggregation_source["HostName"]) + "/" + path logger.debug(f"Forwarding resource REPLACE request {resource_uri}") try: @@ -159,6 +174,8 @@ def forward_request(self, request: SunfishRequestType, path: string, payload: di if payload is None: logger.error("CREATE request payload missing") raise AgentForwardingFailure("CREATE", -1, "Missing payload") + #return self._forward_create_request(path, payload) + #return agents_management._forward_create_request(path, payload) return self._forward_create_request(path, payload) elif request == SunfishRequestType.DELETE: return self._forward_delete_request(path) diff --git a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py index a0ae67f..8bffc14 100644 --- a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py +++ b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py @@ -4,6 +4,7 @@ import logging import string from typing import Optional +import pdb import sunfish.lib.core from sunfish_plugins.objects_managers.sunfish_agent.agents_management import Agent @@ -39,9 +40,12 @@ def forward_to_manager(self, request_type: 'sunfish.models.types.SunfishRequestT path_to_check = "".join(f"/{e}" for e in path_elems) # get the parent path logger.debug(f"Checking managing agent for path: {path_to_check}") + pdb.set_trace() agent = Agent.is_agent_managed(self.core, path_to_check) if agent: logger.debug(f"{path} is managed by an agent, forwarding the request") + #agent_json = sunfish_core.storage_backend.read(agent) + #agent_uri = agent_json["Hostname"] try: agent_response = agent.forward_request(request_type, path, payload=payload) except AgentForwardingFailure as e: diff --git a/sunfish_plugins/storage/file_system_backend/backend_FS.py b/sunfish_plugins/storage/file_system_backend/backend_FS.py index 6d4c055..4d44313 100644 --- a/sunfish_plugins/storage/file_system_backend/backend_FS.py +++ b/sunfish_plugins/storage/file_system_backend/backend_FS.py @@ -46,7 +46,6 @@ def read(self, path: str) -> dict: except FileNotFoundError as e: raise ResourceNotFound(resource) - ''' def write(self, payload: dict): """Checks if the Collection exists for that resource and stores the resource in the correct position of the file system. It create the directory of the resource, creates the index.json file and updates the files linked with the new resource (Collection members or Resources list). @@ -54,7 +53,7 @@ def write(self, payload: dict): Args: payload (json): json representing the resource that should be stored. - Raisexs: + Raises: CollectionNotSupported: the storage of the collections is not supported. AlreadyExists: it is not possible to have duplicate resources with the same ID. @@ -66,6 +65,7 @@ def write(self, payload: dict): # get ID and collection from payload length = len(self.redfish_root) id = payload['@odata.id'][length:] # id without redfish.root (es. /redfish/v1/) + parent_is_collection = True # default assumption print(f"BackendFS.write called on {id}") id = id.split('/') @@ -76,7 +76,7 @@ def write(self, payload: dict): if os.path.exists(to_check) is False: print("path does not exist\n") raise ActionNotAllowed() - + ''' with open(to_check, 'r') as data_json: data = json.load(data_json) data_json.close() @@ -101,6 +101,8 @@ def write(self, payload: dict): json.dump(data, data_json, indent=4, sort_keys=True) data_json.close() + ''' + # we get here only if all grandparent objects exist last_element = len(id) - 1 collection_type = id[last_element - 1] resource_id = id[last_element] @@ -116,160 +118,8 @@ def write(self, payload: dict): collection_type) # collection_path .../Resources/[folder], collection_type = [folder] parent_path = os.path.dirname(collection_path) # parent path .../Resources - # check if the directory of the Collection already exists - if not os.path.exists(collection_path): - os.makedirs(collection_path) - - config = utils.generate_collection(collection_type) - - # if the item to be written is managed by an agent, we want the collection containing it to also be marked - # accordingly. We do this only for collections to be created because we assume that if the collection is - # there already: - # a. The collection is a first level one that is managed by Sunfish - # b. The collection was previously created during an agent discovery process and therefore already marked - # if "Oem" in payload and "Sunfish_RM" in payload["Oem"] and len(id) > 2 : - # if "Oem" not in config: - # config["Oem"] = {} - # config["Oem"]["Sunfish_RM"] = payload["Oem"]["Sunfish_RM"] - - ## write file Resources/[folder]/index.json - with open(os.path.join(collection_path, "index.json"), "w") as fd: - fd.write(json.dumps(config, indent=4, sort_keys=True)) - fd.close() - - # check if the index.json representing the collection exists. In case it doesnt it will create index.json with the collection template - if os.path.exists(os.path.join(parent_path, "index.json")): - collection_name = collection_type.split('/')[-1] - utils.update_collections_parent_json(path=os.path.join(parent_path, "index.json"), type=collection_name, - link=self.redfish_root + collection_type) - else: - utils.generate_collection(collection_type) - else: - # checks if there is already a resource with the same id - index_path = os.path.join(collection_path, "index.json") - if utils.check_unique_id(index_path, payload['@odata.id']) is False: - raise AlreadyExists(payload['@odata.id']) - - # creates folder of the element and write index.json (assuming that the payload is valid i dont use any kind of template to write index.json) - folder_id_path = os.path.join(collection_path, resource_id) # .../Resources/[folder]/[id] - - # creates the folder of the element - if not os.path.exists(folder_id_path): - os.mkdir(folder_id_path) - parent_path = os.path.join(*folder_id_path.split("/")[:-2]) - parent_json = "/" + os.path.join(parent_path, "index.json") - root_path = os.path.join(os.getcwd(), self.root) - if not os.path.exists(parent_json) and parent_path != root_path[1:]: - logger.warning( - "You should not be here, this is crating an entire path where multiple folders are not existing") - - - - with open(os.path.join(folder_id_path, "index.json"), "w") as fd: - fd.write(json.dumps(payload, indent=4, sort_keys=True)) - fd.close() - - json_collection_path = os.path.join(collection_path, 'index.json') - - # updates the collection with the new element created - if os.path.exists(json_collection_path): - utils.update_collections_json(path=json_collection_path, link=payload['@odata.id']) - else: - utils.generate_collection(collection_type) - - # Events have to be handled in a different way. - # To check if write() is called by an event subscription (EventDestination format) I check 'Destination' because - # it is the only required required property that other objects doesnt have - - logging.info('BackendFS: [POST] success') - return payload - - ''' - def write(self, payload: dict): - """ - Checks if the Collection exists for that resource and stores the resource in the correct position of the file system. - It create the directory of the resource, creates the index.json file and updates the files linked with the new resource (Collection members or Resources list). - - Args: - payload (json): json representing the resource that should be stored. - - Raises: - CollectionNotSupported: the storage of the collections is not supported. - AlreadyExists: it is not possible to have duplicate resources with the same ID. - - Returns: - json: stored data - """ - logging.info('new_BackendFS write called') - - # get ID and collection from payload - length = len(self.redfish_root) - redfishId = payload['@odata.id'][length:] # id without redfish.root (es. /redfish/v1/) - parent_is_collection = True # default assumption - - print(f"BackendFS.write called on {redfishId}") - id = redfishId.split('/') - print(f"BackendFS.write split id == {id}") #pdb.set_trace() - for index in range(2, len(id[1:])): - to_check = os.path.join('/'.join(id[:index]), 'index.json') - to_check = os.path.join(os.getcwd(), self.root, to_check) - print(f"BackendFS.write(): path to check: {to_check}") - if os.path.exists(to_check) is False: - print("intermediate path above the parent object does not exist\n") - raise ActionNotAllowed() - - ''' - with open(to_check, 'r') as data_json: - data = json.load(data_json) - data_json.close() - if 'Collection' in data["@odata.type"]: - print("path is to a Collection\n") - members = data["Members"] - for x in members: - if x["@odata.id"] == os.path.join(self.redfish_root, '/'.join(id[:index + 1])): - present = True - else: - print("path is to assumed list object") - if data[id[index]]: - element = data[id[index]] - print(f"element {id} found is {element}") - if type(element) is not list: - continue - for el in element: - if el["@odata.id"] == os.path.join(self.redfish_root, '/'.join(id[:index + 1])): - present = True - else: - el["@odata.id"] = os.path.join(self.redfish_root, '/'.join(id[:index + 1])) - print(f"BackendFS.write of {el['@odata.id']}") - with open(to_check, 'w') as data_json: - json.dump(data, data_json, indent=4, sort_keys=True) - data_json.close() - else: - print("no element found???") - ''' - - # we get here if all paths through grandparent object already exist - last_element = len(id) - 1 - collection_type = id[last_element - 1] # path element for parent object - resource_id = id[last_element] # actual redfish object to create - full_collection = '' - # find the path of the parent object which is should NOT be assumed to be a 'Collection' - if len(id) > 2: - for i in range(0, last_element - 1): - full_collection = full_collection + id[i] + '/' - print(f"backendFS.write: building full_collection: {full_collection}") - - collection_type = os.path.join(full_collection, collection_type) - - collection_path = os.path.join(os.getcwd(), self.root, - collection_type) - print(f"backendFS.write: collection_path is {collection_path}") - parent_path = os.path.dirname(collection_path) - print(f"backendFS.write: parent_path is {parent_path}") - - # check if the directory of the parent object already exists - pdb.set_trace() + # check if the directory of the Collection already exists if not os.path.exists(collection_path): # if parent directory doesn't exist, we assume it is a collection and create the collection print(f"backendFS.write: making collection path directory") @@ -289,31 +139,19 @@ def write(self, payload: dict): # config["Oem"] = {} # config["Oem"]["Sunfish_RM"] = payload["Oem"]["Sunfish_RM"] - ## write file Resources/[folder]/index.json for the newly created Collection object + ## write file Resources/[folder]/index.json with open(os.path.join(collection_path, "index.json"), "w") as fd: - print(f"backendFS.write: writing collection path object\n {json.dumps(config, indent=4)}") fd.write(json.dumps(config, indent=4, sort_keys=True)) fd.close() - # check if the index.json representing the parent collection exists. In case it doesnt it will create index.json with the collection template + # check if the index.json representing the collection exists. In case it doesnt it will create index.json with the collection template if os.path.exists(os.path.join(parent_path, "index.json")): - with open(os.path.join(parent_path, "index.json"), 'r') as parent_json: - parent_data = json.load(parent_json) - parent_json.close() - if 'Collection' in parent_data["@odata.type"]: - print("parent path is to a Collection\n") - print(f"members = {data['Members']}") - pass - collection_name = collection_type.split('/')[-1] - print(f"backendFS.write: updating parent collection at {os.path.join(parent_path, 'index.json')}") utils.update_collections_parent_json(path=os.path.join(parent_path, "index.json"), type=collection_name, link=self.redfish_root + collection_type) else: - # no parent collection, so we will make it up! utils.generate_collection(collection_type) else: - # parent object already exists, is it a Collection? # checks if there is already a resource with the same id index_path = os.path.join(collection_path, "index.json") with open(index_path, 'r') as data_json: @@ -329,6 +167,8 @@ def write(self, payload: dict): parent_is_collection = False # pass + + # creates folder of the element and write index.json (assuming that the payload is valid i dont use any kind of template to write index.json) folder_id_path = os.path.join(collection_path, resource_id) # .../Resources/[folder]/[resource_id] @@ -341,11 +181,11 @@ def write(self, payload: dict): root_path = os.path.join(os.getcwd(), self.root) if not os.path.exists(parent_json) and parent_path != root_path[1:]: logger.warning( - "You should not be here, this is crating an entire path where multiple folders are not existing") + "You should not be here, this is creating an entire path with multiple missing grandparents") - print(f"backend_FS.write: writing {folder_id_path}/index.json") + logger.info(f"backend_FS.write: writing {folder_id_path}/index.json") with open(os.path.join(folder_id_path, "index.json"), "w") as fd: fd.write(json.dumps(payload, indent=4, sort_keys=True)) fd.close() From 9bbf979a025be0c874bc148bf61bf85d2b7fdf84 Mon Sep 17 00:00:00 2001 From: rherrell Date: Fri, 20 Sep 2024 12:49:51 -0600 Subject: [PATCH 13/28] TriggerEvent handler WIP Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 85 +++++++++++++++---- .../sunfish_agent/sunfish_agent_manager.py | 2 +- 2 files changed, 69 insertions(+), 18 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index c13beb2..12475cc 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -8,7 +8,7 @@ import warnings import shutil from uuid import uuid4 - +import pdb import requests from sunfish.events.event_handler_interface import EventHandlerInterface @@ -126,27 +126,76 @@ def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context # logger.info("TriggerEvent method called") file_to_send = event['MessageArgs'][0] # relative Resource Path - hostname = event['MessageArgs'][1] # Agent address - initiator = event['OriginOfCondition']['@odata.id'] - logger.info(f"file_to_send path is {file_to_send}") + #file_path = os.path.join(self.conf['redfish_root'], file_to_send) + hostname = event['MessageArgs'][1] # target address + destination = hostname + "/EventListener" # may match a Subscription object's 'Destination' property + logger.debug(f"path of file_to_send is {file_to_send}") try: - if os.path.exists('file_to_send'): - print("found the event file") - # event_to_send = contents of file_to_send - - # these lines are not yet correct!! - # send the event as a POST to the EventListener - #response = requests.post(f"{hostname}/EventListener",event_to_send) - #if response.status_code != 200: - # raise Exception("Cannot find ConnectionMethod") - #response = response.json() - - resp = 204 + if os.path.exists(file_to_send): + with open(file_to_send, 'r') as data_json: + event_to_send = json.load(data_json) + data_json.close() + + logger.debug("found the event file") + + if event_to_send["Context"] == "": + logger.debug("no context in template event") + # don't fill it in, send the NULL + pass + elif event_to_send["Context"] == "None": + logger.debug("template event uses subscriber assigned Context") + # check if the Destination for this event is a registered subscriber + # use as "Context" of this the event_to_send, or use NULL if not found + event_to_send["Context"] = RedfishEventHandler.find_subscriber_context(destination) + #event_to_send["Context"] = "" #just fake it for now + pass + + logger.debug(f"event_to_send\n {event_to_send}" ) + try: + # send the event as a POST to the EventListener + response = requests.post(destination,json=event_to_send) + if response.status_code != 200: + logger.debug(f"Destination returned code {response.status_code}") + return response + else: + logger.info(f"TriggerEvents Succeeded: code {response.status_code}") + return response + except Exception: + raise Exception(f"Event forwarding to destination {destination} failed.") + response = 500 + return response + + else: + logger.error(f"file not found: {file_to_send} ") + response = 404 + return response except Exception: raise Exception("TriggerEvents Failed") resp = 500 - return resp + return resp + + + def find_subscriber_context(destination): + # look up the subscriber's "Context" for the given event Destination + pdb.set_trace() + context = "" + try: + subscribers_list = event_handler.core.storage_backend.read( + os.path.join(self.core.conf["redfish_root"], + "EventService", "Subscriptions") + ) + print(f"subscribers: {subscribers}") + for member in subscribers_list['Members']: + print(f"checking {member}") + subscriber = event_handler.core.storage_backend.read(member["@odata.id"]) + if subscriber['Destination'] == destination: + context=subscriber['Context'] + logger.info(f"Found matching Destination in {member}") + except Exception: + logger.info(f"failed to find a matching Destination") + + return context @@ -479,6 +528,7 @@ def createInspectedObject(self,redfish_obj, aggregation_source): add_aggregation_source_reference(redfish_obj, aggregation_source) print(f"creating object: {file_path}") RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + def add_aggregation_source_reference(redfish_obj, aggregation_source): # BoundaryComponent = ["true", "false", "unknown"] @@ -513,3 +563,4 @@ def add_aggregation_source_reference(redfish_obj, aggregation_source): } if "BoundaryComponent" not in redfish_obj["Oem"]["Sunfish_RM"]: redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] = oem["BoundaryComponent"] + diff --git a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py index 8bffc14..3b1d38c 100644 --- a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py +++ b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py @@ -40,7 +40,7 @@ def forward_to_manager(self, request_type: 'sunfish.models.types.SunfishRequestT path_to_check = "".join(f"/{e}" for e in path_elems) # get the parent path logger.debug(f"Checking managing agent for path: {path_to_check}") - pdb.set_trace() + #pdb.set_trace() agent = Agent.is_agent_managed(self.core, path_to_check) if agent: logger.debug(f"{path} is managed by an agent, forwarding the request") From 5b537979566d1ef74602a9d1ae77db4de2b3bf11 Mon Sep 17 00:00:00 2001 From: rherrell Date: Tue, 24 Sep 2024 18:02:36 -0600 Subject: [PATCH 14/28] fixed another bug in uploading existing objects Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 55 ++++++++++--------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 12475cc..2a87a40 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -146,8 +146,7 @@ def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context logger.debug("template event uses subscriber assigned Context") # check if the Destination for this event is a registered subscriber # use as "Context" of this the event_to_send, or use NULL if not found - event_to_send["Context"] = RedfishEventHandler.find_subscriber_context(destination) - #event_to_send["Context"] = "" #just fake it for now + event_to_send["Context"] = RedfishEventHandler.find_subscriber_context(event_handler.core, destination) pass logger.debug(f"event_to_send\n {event_to_send}" ) @@ -175,29 +174,7 @@ def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context return resp - def find_subscriber_context(destination): - # look up the subscriber's "Context" for the given event Destination - pdb.set_trace() - context = "" - try: - subscribers_list = event_handler.core.storage_backend.read( - os.path.join(self.core.conf["redfish_root"], - "EventService", "Subscriptions") - ) - print(f"subscribers: {subscribers}") - for member in subscribers_list['Members']: - print(f"checking {member}") - subscriber = event_handler.core.storage_backend.read(member["@odata.id"]) - if subscriber['Destination'] == destination: - context=subscriber['Context'] - logger.info(f"Found matching Destination in {member}") - - except Exception: - logger.info(f"failed to find a matching Destination") - - return context - - + class RedfishEventHandler(EventHandlerInterface): dispatch_table = { @@ -329,6 +306,31 @@ def check_subdirs(self, origin): return to_forward + def find_subscriber_context(self, destination): + # look up the subscriber's "Context" for the given event Destination + pdb.set_trace() + context = "" + try: + #subscribers_list = event_handler.core.storage_backend.read( + #subscribers_list = self.core.storage_backend.read( + subscribers_list = self.storage_backend.read( + os.path.join(self.conf["redfish_root"], + "EventService", "Subscriptions") + ) + logger.debug(f"subscribers: {subscribers_list}") + for member in subscribers_list['Members']: + logger.debug(f"checking {member}") + subscriber = self.storage_backend.read(member["@odata.id"]) + if subscriber['Destination'] == destination: + context=subscriber['Context'] + logger.info(f"Found matching Destination in {member}") + + except Exception: + logger.info(f"failed to find a matching Destination") + + return context + + def bfsInspection(self, node, aggregation_source): queue = [] visited = [] @@ -512,7 +514,8 @@ def createInspectedObject(self,redfish_obj, aggregation_source): logger.debug("This is a collection, ignore it until we need it") pass else: - if os.path.exists(file_path): + fs_full_path = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_root"], obj_path, 'index.json') + if os.path.exists(fs_full_path): # check if existing Sunfish object is same as that being fetched from aggregation_source # we have more work to do disambiguate duplicate names from different agents # for now we will just check to be sure we are uploading an actual identical object From ff38b09e255924474a8232d8dcf83df95121ec01 Mon Sep 17 00:00:00 2001 From: rherrell Date: Wed, 2 Oct 2024 08:06:18 -0600 Subject: [PATCH 15/28] added check for duplicate resource in CreateResource event handler Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 2a87a40..da7eded 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -78,6 +78,7 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event @classmethod def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, context: str): # incoming context (an aggregation_source ID) comes from event sender + pdb.set_trace() if context == "": raise PropertyNotFound("Missing agent context in ResourceCreated event") @@ -86,15 +87,17 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont id = event['OriginOfCondition']['@odata.id'] # ex: /redfish/v1/Fabrics/CXL logger.info(f"aggregation_source's redfish URI: {id}") # must have an aggregation_source object to assign as owner of new resource - aggregation_source = event_handler.core.storage_backend.read( - os.path.join(event_handler.core.conf["redfish_root"], + agg_src_path = os.path.join(os.getcwd(), event_handler.core.conf["backend_conf"]["fs_root"], "AggregationService", "AggregationSources", context) - ) + if os.path.exists(agg_src_path): + aggregation_source = event_handler.core.storage_backend.read(agg_src_path) + else: + raise PropertyNotFound("Cannot find aggregation source; file does not exist") hostname = aggregation_source["HostName"] response = requests.get(f"{hostname}/{id}") if response.status_code != 200: - raise Exception("Cannot find new resource at aggregation_source") + raise ResourceNotFound("Aggregation source read from Agent failed") response = response.json() print(f"new resource is \n") print(json.dumps(response, indent=4)) @@ -110,9 +113,21 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont #event_handler.core.storage_backend.write(response) #event_handler.core.create_object(id, response) - RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) + # New resource should not exist in Sunfish inventory + length = len(event_handler.core.conf["redfish_root"]) + resource = response["@odata.id"][length:] + fs_full_path = os.path.join(os.getcwd(), event_handler.core.conf["backend_conf"]["fs_root"], + resource, 'index.json') + if not os.path.exists(fs_full_path): + RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) + else: # for now, we will not process the new resource + logger.error(f"resource to create {id} already exists.") + raise AlreadyExists(id) + + # patch the aggregation_source in storage with all the new resources found event_handler.core.storage_backend.patch(id, aggregation_source) + return 200 @classmethod def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context: str): @@ -130,6 +145,7 @@ def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context hostname = event['MessageArgs'][1] # target address destination = hostname + "/EventListener" # may match a Subscription object's 'Destination' property logger.debug(f"path of file_to_send is {file_to_send}") + pdb.set_trace() try: if os.path.exists(file_to_send): with open(file_to_send, 'r') as data_json: From ce02983c16bd70e8d6ffe785570a4002c776d278 Mon Sep 17 00:00:00 2001 From: rherrell Date: Thu, 17 Oct 2024 16:55:34 -0600 Subject: [PATCH 16/28] have working renamer -- no translators yet Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 149 ++++++++++++++++-- 1 file changed, 133 insertions(+), 16 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index da7eded..76bed82 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -81,6 +81,13 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont pdb.set_trace() if context == "": raise PropertyNotFound("Missing agent context in ResourceCreated event") + # put the global definition and initial loading of sunfishAliasDB dictionary here + # sunfishAliasDB contains renaming data, the alias xref array, the boundaryLink + # data, and assorted flags that are used during upload renaming and final merge of + # boundary components based on boundary links. + + # + # logger.info("New resource created") @@ -121,12 +128,19 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont if not os.path.exists(fs_full_path): RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) else: # for now, we will not process the new resource - logger.error(f"resource to create {id} already exists.") + logger.error(f"resource to create: {id} already exists.") + # eventually we need to resolve the URI conflict by checking that the + # aggregation_source of the existing obj is the same aggregation_source + # which just sent this CreateResource event, making this a duplicate attempt. + # if this is a different aggregation_source, we have a naming conflict + # to handle inside the createInspectedObject() routine raise AlreadyExists(id) - # patch the aggregation_source in storage with all the new resources found + # patch the aggregation_source object in storage with all the new resources found event_handler.core.storage_backend.patch(id, aggregation_source) + # before we are done, we have to process all renamed paths from this aggregation_source. + # Need to call the updateUploadedObjectPaths() utility return 200 @classmethod @@ -208,6 +222,7 @@ def __init__(self, core): self.core = core self.redfish_root = core.conf["redfish_root"] self.fs_root = core.conf["backend_conf"]["fs_root"] + self.fs_SunfishPrivate = core.conf["backend_conf"]["fs_private"] self.subscribers_root = core.conf["backend_conf"]["subscribers_root"] self.backend = core.storage_backend @classmethod @@ -495,8 +510,8 @@ def fetchResource(self, obj_id, aggregation_source): if response.status_code == 200: # Agent must have returned this object redfish_obj = response.json() - # now copy object into Sunfish inventory - RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source) + # now rename if necessary and copy object into Sunfish inventory + redfish_obj = RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source) if redfish_obj['@odata.id'] not in aggregation_source["Links"]["ResourcesAccessed"]: aggregation_source["Links"]["ResourcesAccessed"].append(redfish_obj['@odata.id']) return redfish_obj @@ -508,7 +523,7 @@ def createInspectedObject(self,redfish_obj, aggregation_source): raise PropertyNotFound(f"missing @odata.id in \n {json.dumps(redfish_obj, indent=2)}") file_path = os.path.join(self.conf['redfish_root'], obj_path) - logger.debug(f"try creating object: {file_path}") + logger.debug(f"try creating agent-named object: {file_path}") ''' if 'Collection' not in redfish_obj['@odata.type']: @@ -530,27 +545,128 @@ def createInspectedObject(self,redfish_obj, aggregation_source): logger.debug("This is a collection, ignore it until we need it") pass else: + # obj_path is the Agent-proposed path name, but we need to search for the Sunfish (aliased) name + # obj_path = isAgentURI_Aliased(self,obj_path,aggregation_source) fs_full_path = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_root"], obj_path, 'index.json') if os.path.exists(fs_full_path): - # check if existing Sunfish object is same as that being fetched from aggregation_source - # we have more work to do disambiguate duplicate names from different agents - # for now we will just check to be sure we are uploading an actual identical object - # (which shouldn't happen since we are adding in the Sunfish_RM details) - if self.get_object(file_path) == redfish_obj: - warnings.warn('Duplicate Resource found, ignored') - pass - elif self.get_object(file_path) != redfish_obj: - warnings.warn('Resource state changed') - # put object change checks and updates here + uploading_agent_uri= aggregation_source["@odata.id"] + existing_obj = self.get_object(file_path) + existing_agent_uri = existing_obj["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] + print(f"managingAgent of Sunfish {obj_path} is {uploading_agent_uri}") + if existing_agent_uri == uploading_agent_uri: + # we have a duplicate posting of the object from same agent + # check if existing Sunfish object is same as that being fetched from aggregation_source + # Need to ignore the Sunfish_RM structure in the compare + # Thus, the following isn't completely correct + # note we don't update the object (for now) + if self.get_object(file_path) == redfish_obj: + # (which shouldn't happen since we are adding in the Sunfish_RM details) + warnings.warn('Duplicate Resource found, ignored') + pass + elif self.get_object(file_path) != redfish_obj: + warnings.warn('Resource state changed') + # put object change checks and updates here + else: + # we may have a naming conflict between agents + if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] != "foreign": + # we have a simple name conflict + # find new name, build xref + redfish_obj = RedfishEventHandler.renameUploadedObject(self, redfish_obj, aggregation_source) + # for now use original naming + add_aggregation_source_reference(redfish_obj, aggregation_source) + print(f"creating renamed object: {file_path}") + RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + else: + # we have a placeholder or boundary link component to process + # put in placeholder codes here + print(f"Non-owned component {obj_path} uploaded, ignored") + #add_aggregation_source_reference(redfish_obj, aggregation_source) + #print(f"creating renamed object: {file_path}") + #RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + + else: # assume new object, create it and its parent collection if needed add_aggregation_source_reference(redfish_obj, aggregation_source) print(f"creating object: {file_path}") RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + + return redfish_obj + def renameUploadedObject(self,redfish_obj, aggregation_source): + # redfish_obj uses agent namespace + # aggregation_source is an object in the Sunfish namespace + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + print(f"reading alias file {uri_alias_file}") + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + print(json.dumps(uri_aliasDB, indent = 4)) + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + print(json.dumps(redfish_obj, indent=2)) + agentGiven_obj_path = redfish_obj['@odata.id'] + agentGiven_segments = agentGiven_obj_path.split("/") + agentGiven_obj_name = agentGiven_segments[-1] + #agentGiven_tree_segments = os.path.relpath(redfish_obj['@odata.id'], self.conf['redfish_root']).split("/") + print(f"agentGiven tree: {agentGiven_segments}") + #agent_file_path = os.path.join(self.conf['redfish_root'], agent_obj_path, 'index.json') + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + # generate a new path and object name + logger.debug(f"renaming object: {agentGiven_obj_path}") + logger.debug(f"agent id: {owning_agent_id}") + sunfishGiven_obj_name = "Sunfish_"+owning_agent_id[:4]+"_"+agentGiven_obj_name + sunfishGiven_obj_path = "/" + for i in range(1,len(agentGiven_segments)-1): + print(agentGiven_segments[i]) + sunfishGiven_obj_path=sunfishGiven_obj_path + agentGiven_segments[i]+"/" + sunfishGiven_obj_path=sunfishGiven_obj_path + sunfishGiven_obj_name + # need to check new name is also unused + if sunfishGiven_obj_path in uri_aliasDB["Sunfish_xref_URIs"]["aliases"]: + # new name was still not unique, just brute force it! + temp_string = "Sunfish_"+owning_agent_id+"_"+agentGiven_obj_name + sunfishGiven_obj_path=sunfishGiven_obj_path.replace(sunfishGiven_obj_name,temp_string) + + # + print(sunfishGiven_obj_path) + redfish_obj['@odata.id'] = sunfishGiven_obj_path + if redfish_obj['Id'] == agentGiven_obj_name: + redfish_obj['Id'] = sunfishGiven_obj_name + print(json.dumps(redfish_obj, indent=2)) + # now need to update aliasDB + new_alias = {} + new_alias[agentGiven_obj_path] = sunfishGiven_obj_path + if owning_agent_id not in uri_aliasDB["Agents_xref_URIs"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] = [] + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"].append(new_alias) + print(json.dumps(uri_aliasDB, indent=2)) + else: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"].append(new_alias) + print(json.dumps(uri_aliasDB, indent=2)) + + if sunfishGiven_obj_path not in uri_aliasDB["Sunfish_xref_URIs"]["aliases"]: + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfishGiven_obj_path] = [] + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfishGiven_obj_path].append(agentGiven_obj_path) + else: + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfishGiven_obj_path].append(agentGiven_obj_path) + + # now need to write aliasDB back to file + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + + return redfish_obj def add_aggregation_source_reference(redfish_obj, aggregation_source): - # BoundaryComponent = ["true", "false", "unknown"] + # BoundaryComponent = ["owned", "foreign", "non-boundary","unknown"] oem = { "@odata.type": "#SunfishExtensions.v1_0_0.ResourceExtensions", "ManagingAgent": { @@ -558,6 +674,7 @@ def add_aggregation_source_reference(redfish_obj, aggregation_source): }, "BoundaryComponent": "unknown" } + print(f"checking Oem field of {json.dumps(redfish_obj, indent=4)}") if "Oem" not in redfish_obj: redfish_obj["Oem"] = {"Sunfish_RM": oem} elif "Sunfish_RM" not in redfish_obj["Oem"]: From 607c258f029a39962af9384465c3d4e565c7c2f9 Mon Sep 17 00:00:00 2001 From: rherrell Date: Mon, 21 Oct 2024 18:40:04 -0600 Subject: [PATCH 17/28] Agent upload collisions renamed, aliasDB now updated Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 125 ++++++++++++++++-- .../storage/file_system_backend/backend_FS.py | 2 +- 2 files changed, 114 insertions(+), 13 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 76bed82..60e756d 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -379,7 +379,7 @@ def handleNestedObject(self, obj): if type(obj) == dict: for key,value in obj.items(): if key == '@odata.id': - print(f"found URL to Redfish obj {value}") + print(f"found nested URL to Redfish obj {value}") RedfishEventHandler.handleEntryIfNotVisited(self, value, visited, queue) elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): handleNestedObject(self, value) # need to ignore Sunfish_RM paths; they are wrong namespace @@ -398,8 +398,8 @@ def handleNestedObject(self, obj): for key, val in redfish_obj.items(): if key == '@odata.id': - RedfishEventHandler.handleEntryIfNotVisited(self, val, visited, queue) - print(f"found URL to Redfish obj {val}") + #RedfishEventHandler.handleEntryIfNotVisited(self, val, visited, queue) + print(f"ignored top-level @odata.id to Redfish obj {val}") pass #elif key == 'Links': # if type(val)==dict or type(val)==list: @@ -485,17 +485,18 @@ def fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched): for node_position in range(4, len(path_nodes) - 1): redfish_path = f'/redfish/v1/{"/".join(path_nodes[3:node_position + 1])}' logger.info(f"Checking redfish path: {redfish_path}") - print(f"visit path {redfish_path} ?") + print(f"do we need to visit path {redfish_path} ?") if redfish_path not in visited: need_parent_prefetch = True logger.info(f"Inspect redfish path: {redfish_path}") - print(f"adding redfish path to queue: {redfish_path}") + print(f"yes, adding redfish path to queue: {redfish_path}") queue.append(redfish_path) visited.append(redfish_path) if need_parent_prefetch: # requeue this id and return 'None' queue.append(id) else: # all grand-parent objects have been visited # go get this object from the aggregation_source + print(f"fetchResourceAndTree fetching object {id}") redfish_obj = RedfishEventHandler.fetchResource(self, id, aggregation_source) fetched.append(id) return redfish_obj @@ -509,6 +510,7 @@ def fetchResource(self, obj_id, aggregation_source): if response.status_code == 200: # Agent must have returned this object redfish_obj = response.json() + print(f"successfully fetched {obj_id}") # now rename if necessary and copy object into Sunfish inventory redfish_obj = RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source) @@ -545,9 +547,21 @@ def createInspectedObject(self,redfish_obj, aggregation_source): logger.debug("This is a collection, ignore it until we need it") pass else: - # obj_path is the Agent-proposed path name, but we need to search for the Sunfish (aliased) name - # obj_path = isAgentURI_Aliased(self,obj_path,aggregation_source) + # @odata.id is the Agent-proposed path name, but we need to search for the Sunfish (aliased) name + agent_redfish_URI = redfish_obj['@odata.id'] + sunfish_aliased_URI = RedfishEventHandler.xlateToSunfishPath(self, agent_redfish_URI, aggregation_source) + # if Sunfish has aliased the object URI, we need to update the object before we write it! + if agent_redfish_URI != sunfish_aliased_URI: + redfish_obj['@odata.id'] = sunfish_aliased_URI + RedfishEventHandler.updateSunfishAliases(self, sunfish_aliased_URI, agent_redfish_URI, aggregation_source) + if redfish_obj['Id'] == agent_redfish_URI.split("/")[-1]: + redfish_obj['Id'] = sunfish_aliased_URI.split("/")[-1] + print(f"xlated agent_redfish_URI is {sunfish_aliased_URI}") + # use Sunfish (aliased) paths for conflict testing if it exists + obj_path = os.path.relpath(sunfish_aliased_URI, self.conf['redfish_root']) fs_full_path = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_root"], obj_path, 'index.json') + file_path = os.path.join(self.conf['redfish_root'], obj_path) + if os.path.exists(fs_full_path): uploading_agent_uri= aggregation_source["@odata.id"] existing_obj = self.get_object(file_path) @@ -593,9 +607,98 @@ def createInspectedObject(self,redfish_obj, aggregation_source): return redfish_obj + def xlateToSunfishPath(self,agent_path, aggregation_source): + # redfish_obj uses agent namespace + # aggregation_source is an object in the Sunfish namespace + # will eventually replace file read & load of aliasDB with aliasDB passed in as arg + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + print(f"reading alias file {uri_alias_file}") + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + print(json.dumps(uri_aliasDB, indent = 4)) + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + print(f"xlate {agent_path} to Sunfish path") + agentGiven_segments = agent_path.split("/") + print(f"agentGiven tree: {agentGiven_segments}") + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + logger.debug(f"agent id: {owning_agent_id}") + # check if owning_agent has any aliases assigned + if owning_agent_id in uri_aliasDB["Agents_xref_URIs"]: + logger.debug(f"xlating Agent path : {agent_path}") + print(f"xlating Agent path : {agent_path}") + agentFinal_obj_path = "" + for i in range(1,len(agentGiven_segments)): + print(agentGiven_segments[i]) + agentFinal_obj_path=agentFinal_obj_path +"/"+ agentGiven_segments[i] + print(f"agentFinal_obj_path is {agentFinal_obj_path}") + # test this path segment + print("test this path segment") + print( agentFinal_obj_path in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"]) + if agentFinal_obj_path in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"]: + # need to replace agent_path built to this point with sunfish alias + print(f"found an alias for {agentFinal_obj_path}") + sunfishAliasPath = uri_aliasDB["Agents_xref_URIs"][owning_agent_id] \ + ["aliases"][agentFinal_obj_path] + agentFinal_obj_path = sunfishAliasPath + print(f"aliased path is {agentFinal_obj_path}") + # next segment + agent_path = agentFinal_obj_path + return agent_path + + + def updateSunfishAliases(self,sunfish_URI, agent_URI, aggregation_source): + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + print(f"reading alias file {uri_alias_file}") + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + print(json.dumps(uri_aliasDB, indent = 4)) + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + logger.debug(f"updating aliases for : {owning_agent_id}") + if owning_agent_id not in uri_aliasDB["Agents_xref_URIs"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agent_URI]=sunfish_URI + print(json.dumps(uri_aliasDB, indent=2)) + else: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agent_URI]=sunfish_URI + print(json.dumps(uri_aliasDB, indent=2)) + + if sunfish_URI not in uri_aliasDB["Sunfish_xref_URIs"]["aliases"]: + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfish_URI] = [] + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfish_URI].append(agent_URI) + else: + uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfish_URI].append(agent_URI) + + # now need to write aliasDB back to file + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + + return uri_aliasDB + def renameUploadedObject(self,redfish_obj, aggregation_source): # redfish_obj uses agent namespace # aggregation_source is an object in the Sunfish namespace + # this routine ONLY renames the @Odata.id and "id" try: uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') if os.path.exists(uri_alias_file): @@ -641,15 +744,13 @@ def renameUploadedObject(self,redfish_obj, aggregation_source): redfish_obj['Id'] = sunfishGiven_obj_name print(json.dumps(redfish_obj, indent=2)) # now need to update aliasDB - new_alias = {} - new_alias[agentGiven_obj_path] = sunfishGiven_obj_path if owning_agent_id not in uri_aliasDB["Agents_xref_URIs"]: uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = {} - uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] = [] - uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"].append(new_alias) + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agentGiven_obj_path]=sunfishGiven_obj_path print(json.dumps(uri_aliasDB, indent=2)) else: - uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"].append(new_alias) + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agentGiven_obj_path]=sunfishGiven_obj_path print(json.dumps(uri_aliasDB, indent=2)) if sunfishGiven_obj_path not in uri_aliasDB["Sunfish_xref_URIs"]["aliases"]: diff --git a/sunfish_plugins/storage/file_system_backend/backend_FS.py b/sunfish_plugins/storage/file_system_backend/backend_FS.py index 4d44313..998c076 100644 --- a/sunfish_plugins/storage/file_system_backend/backend_FS.py +++ b/sunfish_plugins/storage/file_system_backend/backend_FS.py @@ -158,7 +158,7 @@ def write(self, payload: dict): parent_data = json.load(data_json) data_json.close() if 'Collection' in parent_data["@odata.type"]: - print("path is to a Collection\n") + print("parent path is to a Collection\n") if utils.check_unique_id(index_path, payload['@odata.id']) is False: raise AlreadyExists(payload['@odata.id']) pass From 748baaa83a98a84df90cfc5a029590d792a2dd7b Mon Sep 17 00:00:00 2001 From: rherrell Date: Fri, 25 Oct 2024 13:13:02 -0600 Subject: [PATCH 18/28] update of an objects aliased links now works, extra prints still included Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 114 ++++++++++++++++-- 1 file changed, 101 insertions(+), 13 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 60e756d..399bd40 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -342,8 +342,6 @@ def find_subscriber_context(self, destination): pdb.set_trace() context = "" try: - #subscribers_list = event_handler.core.storage_backend.read( - #subscribers_list = self.core.storage_backend.read( subscribers_list = self.storage_backend.read( os.path.join(self.conf["redfish_root"], "EventService", "Subscriptions") @@ -412,6 +410,11 @@ def handleNestedObject(self, obj): print(json.dumps(sorted(fetched),indent = 4)) print("\n\nAgent did not return objects for the following URIs:\n") print(json.dumps(sorted(notfound),indent = 4)) + + # now need to revisit all uploaded objects and update any links renamed after + # the uploaded object was written + RedfishEventHandler.updateAllAliasedLinks(self,aggregation_source) + return visited #why not the 'fetched' list? def create_uploaded_object(self, path: str, payload: dict): @@ -543,20 +546,22 @@ def createInspectedObject(self,redfish_obj, aggregation_source): else: logger.debug("This is a collection") ''' + agent_redfish_URI = redfish_obj['@odata.id'] + sunfish_aliased_URI = RedfishEventHandler.xlateToSunfishPath(self, agent_redfish_URI, aggregation_source) + # @odata.id is the Agent-proposed path name, but we need to search for the Sunfish (aliased) name. + # becomes part of xlateToSunfishObj(self, agent_obj,aggregation_source) -> translated_agent_obj + # if Sunfish has aliased the object URI, we need to update the object before we write it! + if agent_redfish_URI != sunfish_aliased_URI: + redfish_obj['@odata.id'] = sunfish_aliased_URI + RedfishEventHandler.updateSunfishAliasDB(self, sunfish_aliased_URI, agent_redfish_URI, aggregation_source) + if 'Id' in redfish_obj: + if redfish_obj['Id'] == agent_redfish_URI.split("/")[-1]: + redfish_obj['Id'] = sunfish_aliased_URI.split("/")[-1] + print(f"xlated agent_redfish_URI is {sunfish_aliased_URI}") if 'Collection' in redfish_obj['@odata.type']: logger.debug("This is a collection, ignore it until we need it") pass else: - # @odata.id is the Agent-proposed path name, but we need to search for the Sunfish (aliased) name - agent_redfish_URI = redfish_obj['@odata.id'] - sunfish_aliased_URI = RedfishEventHandler.xlateToSunfishPath(self, agent_redfish_URI, aggregation_source) - # if Sunfish has aliased the object URI, we need to update the object before we write it! - if agent_redfish_URI != sunfish_aliased_URI: - redfish_obj['@odata.id'] = sunfish_aliased_URI - RedfishEventHandler.updateSunfishAliases(self, sunfish_aliased_URI, agent_redfish_URI, aggregation_source) - if redfish_obj['Id'] == agent_redfish_URI.split("/")[-1]: - redfish_obj['Id'] = sunfish_aliased_URI.split("/")[-1] - print(f"xlated agent_redfish_URI is {sunfish_aliased_URI}") # use Sunfish (aliased) paths for conflict testing if it exists obj_path = os.path.relpath(sunfish_aliased_URI, self.conf['redfish_root']) fs_full_path = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_root"], obj_path, 'index.json') @@ -589,11 +594,13 @@ def createInspectedObject(self,redfish_obj, aggregation_source): # for now use original naming add_aggregation_source_reference(redfish_obj, aggregation_source) print(f"creating renamed object: {file_path}") + logger.info(f"creating renamed object: {file_path}") RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) else: # we have a placeholder or boundary link component to process # put in placeholder codes here print(f"Non-owned component {obj_path} uploaded, ignored") + logger.info(f"Non-owned component {obj_path} uploaded, ignored") #add_aggregation_source_reference(redfish_obj, aggregation_source) #print(f"creating renamed object: {file_path}") #RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) @@ -654,8 +661,89 @@ def xlateToSunfishPath(self,agent_path, aggregation_source): agent_path = agentFinal_obj_path return agent_path + def updateAllAliasedLinks(self,aggregation_source): + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + print(json.dumps(uri_aliasDB, indent = 4)) + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + logger.debug(f"updating all objects for : {owning_agent_id}") + + agent_uploads=[] + # for every aggregation_source: + if owning_agent_id in uri_aliasDB['Agents_xref_URIs']: + # grab the k,v aliases structure and the list of URIs for owned objects + if 'aliases' in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]: + agent_aliases = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['aliases'] + agent_uploads = aggregation_source["Links"]["ResourcesAccessed"] + + # update all the objects + for upload_obj_URI in agent_uploads: + logger.info(f"updating links in obj: {upload_obj_URI}") + print(f"updating links in obj: {upload_obj_URI}") + RedfishEventHandler.updateObjectAliasedLinks(self, upload_obj_URI, agent_aliases) + + return + + def updateObjectAliasedLinks(self, object_URI, agent_aliases): + + def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): + #pdb.set_trace() + nestedPaths = [] + if type(obj) == list: + i = 0; + for entry in obj: + if type(entry) == list or type(entry) == dict: + nestedPaths.extend( findNestedURIs(self, URI_to_match, URI_to_sub, entry, path_to_nested_URI+"["+str(i)+"]")) + else: + i=i+1 + if type(obj) == dict: + for key,value in obj.items(): + if key == '@odata.id'and path_to_nested_URI != "": + # check @odata.id: value for an alias + if value == URI_to_match: + print(f"---- alias found at {path_to_nested_URI}") + print(f"---- modifying {value} to {URI_to_sub}") + obj[key] = URI_to_sub + nestedPaths.append(path_to_nested_URI) + elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): + nestedPaths.extend(findNestedURIs(self, URI_to_match, URI_to_sub, value, path_to_nested_URI+"["+key+"]" )) + return nestedPaths + + try: + sunfish_obj = self.storage_backend.read( object_URI) + for agent_URI, sunfish_URI in agent_aliases.items(): + # find all the references to the aliased agent_URI and replace it + path_to_nested_URI="" + aliasedNestedPaths= findNestedURIs(self, agent_URI, sunfish_URI, sunfish_obj, path_to_nested_URI ) + for path in aliasedNestedPaths: + print(f"---- replaced {agent_URI} with {sunfish_URI} at {path}") + if aliasedNestedPaths: + print(f"---- final updated object") + print(json.dumps(sunfish_obj, indent=2)) + self.storage_backend.replace(sunfish_obj) + sunfish_obj = self.storage_backend.read( object_URI) + print(f"---- done with {object_URI}") + print(f"---- read check {object_URI}") + print(json.dumps(sunfish_obj, indent=2)) + + + + except: + logger.error(f"could not update links in object {object_URI}") - def updateSunfishAliases(self,sunfish_URI, agent_URI, aggregation_source): + def updateSunfishAliasDB(self,sunfish_URI, agent_URI, aggregation_source): try: uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') if os.path.exists(uri_alias_file): From c5b371a82ae752ff1cb7ec79905f6bd6a350cefd Mon Sep 17 00:00:00 2001 From: rherrell Date: Tue, 29 Oct 2024 14:00:30 -0600 Subject: [PATCH 19/28] added Fabric merge bookkeeping Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 36 ++++++++++++++++--- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 399bd40..960ba5c 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -591,8 +591,10 @@ def createInspectedObject(self,redfish_obj, aggregation_source): # we have a simple name conflict # find new name, build xref redfish_obj = RedfishEventHandler.renameUploadedObject(self, redfish_obj, aggregation_source) - # for now use original naming add_aggregation_source_reference(redfish_obj, aggregation_source) + # here's where we check for a Fabric Object merge + merged_fabrics =RedfishEventHandler.updateIfMergedFabric(self,redfish_obj, existing_obj) + print(f"creating renamed object: {file_path}") logger.info(f"creating renamed object: {file_path}") RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) @@ -738,8 +740,6 @@ def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): print(f"---- read check {object_URI}") print(json.dumps(sunfish_obj, indent=2)) - - except: logger.error(f"could not update links in object {object_URI}") @@ -783,6 +783,35 @@ def updateSunfishAliasDB(self,sunfish_URI, agent_URI, aggregation_source): return uri_aliasDB + def updateIfMergedFabric(self,redfish_obj, sunfish_obj): + flag = False + obj_type = redfish_obj["@odata.type"].split('.')[0] + obj_type = obj_type.replace("#","") # #Évent -> Event + print(f"----- found name conflict on fabric {redfish_obj['@odata.id']}") + if obj_type == "Fabric": + print(f"----- object is fabric") + if "UUID" in redfish_obj and "UUID" in sunfish_obj: + if redfish_obj['UUID'] == sunfish_obj['UUID']: + print(f"----- found merge fabric candidate") + flag = True + # (TODO) more checks + # (TODO) update both redfish_obj and sunfish_obj with Fabric xref in Sunfish_RM + new_obj_fabric_xref={"@odata.id":sunfish_obj["@odata.id"]} + existing_obj_fabric_xref={"@odata.id":redfish_obj["@odata.id"]} + if "MergedFabrics" in redfish_obj["Oem"]["Sunfish_RM"]: + redfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(new_obj_fabric_xref) + else: + redfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"] = [] + redfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(new_obj_fabric_xref) + print(f"redfish merged fabric object: {json.dumps(redfish_obj,indent=2)}") + + else: + print(f"----- not same fabrics") + + + + return flag + def renameUploadedObject(self,redfish_obj, aggregation_source): # redfish_obj uses agent namespace # aggregation_source is an object in the Sunfish namespace @@ -863,7 +892,6 @@ def add_aggregation_source_reference(redfish_obj, aggregation_source): }, "BoundaryComponent": "unknown" } - print(f"checking Oem field of {json.dumps(redfish_obj, indent=4)}") if "Oem" not in redfish_obj: redfish_obj["Oem"] = {"Sunfish_RM": oem} elif "Sunfish_RM" not in redfish_obj["Oem"]: From 3cbdf0ae6fdc8ff9a074f64bae3dea169f0c40b3 Mon Sep 17 00:00:00 2001 From: rherrell Date: Wed, 30 Oct 2024 12:39:24 -0600 Subject: [PATCH 20/28] correctly writes objects with renamed links back to Sunfish data base Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 43 +++++++++++++------ 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 960ba5c..d9409dc 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -127,14 +127,16 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont resource, 'index.json') if not os.path.exists(fs_full_path): RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) - else: # for now, we will not process the new resource + else: # could be a second agent with naming conflicts logger.error(f"resource to create: {id} already exists.") + # let's run the inspection process on it + RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) # eventually we need to resolve the URI conflict by checking that the # aggregation_source of the existing obj is the same aggregation_source # which just sent this CreateResource event, making this a duplicate attempt. # if this is a different aggregation_source, we have a naming conflict # to handle inside the createInspectedObject() routine - raise AlreadyExists(id) + #raise AlreadyExists(id) # patch the aggregation_source object in storage with all the new resources found @@ -592,12 +594,18 @@ def createInspectedObject(self,redfish_obj, aggregation_source): # find new name, build xref redfish_obj = RedfishEventHandler.renameUploadedObject(self, redfish_obj, aggregation_source) add_aggregation_source_reference(redfish_obj, aggregation_source) + # add_aggregation_source_reference will add "Sunfish_RM" dict to "Oem" dict # here's where we check for a Fabric Object merge merged_fabrics =RedfishEventHandler.updateIfMergedFabric(self,redfish_obj, existing_obj) print(f"creating renamed object: {file_path}") logger.info(f"creating renamed object: {file_path}") RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + if merged_fabrics: + # need to update original fabric object with the xref AFTER new obj created + self.storage_backend.replace(existing_obj) + print(f"----- updated (replaced) existing fabric object") + else: # we have a placeholder or boundary link component to process # put in placeholder codes here @@ -715,7 +723,6 @@ def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): if key == '@odata.id'and path_to_nested_URI != "": # check @odata.id: value for an alias if value == URI_to_match: - print(f"---- alias found at {path_to_nested_URI}") print(f"---- modifying {value} to {URI_to_sub}") obj[key] = URI_to_sub nestedPaths.append(path_to_nested_URI) @@ -725,20 +732,21 @@ def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): try: sunfish_obj = self.storage_backend.read( object_URI) + aliasedNestedPaths=[] + obj_modified = False for agent_URI, sunfish_URI in agent_aliases.items(): # find all the references to the aliased agent_URI and replace it path_to_nested_URI="" aliasedNestedPaths= findNestedURIs(self, agent_URI, sunfish_URI, sunfish_obj, path_to_nested_URI ) + if aliasedNestedPaths: + obj_modified = True for path in aliasedNestedPaths: print(f"---- replaced {agent_URI} with {sunfish_URI} at {path}") - if aliasedNestedPaths: + print(f"---- aliasedNestedPaths is {aliasedNestedPaths}") + if obj_modified: print(f"---- final updated object") print(json.dumps(sunfish_obj, indent=2)) self.storage_backend.replace(sunfish_obj) - sunfish_obj = self.storage_backend.read( object_URI) - print(f"---- done with {object_URI}") - print(f"---- read check {object_URI}") - print(json.dumps(sunfish_obj, indent=2)) except: logger.error(f"could not update links in object {object_URI}") @@ -784,18 +792,18 @@ def updateSunfishAliasDB(self,sunfish_URI, agent_URI, aggregation_source): return uri_aliasDB def updateIfMergedFabric(self,redfish_obj, sunfish_obj): - flag = False + did_a_merge = False obj_type = redfish_obj["@odata.type"].split('.')[0] obj_type = obj_type.replace("#","") # #Évent -> Event - print(f"----- found name conflict on fabric {redfish_obj['@odata.id']}") + print(f"----- potential merged object {redfish_obj['@odata.id']}") if obj_type == "Fabric": print(f"----- object is fabric") if "UUID" in redfish_obj and "UUID" in sunfish_obj: if redfish_obj['UUID'] == sunfish_obj['UUID']: print(f"----- found merge fabric candidate") - flag = True - # (TODO) more checks - # (TODO) update both redfish_obj and sunfish_obj with Fabric xref in Sunfish_RM + did_a_merge = True + # (TODO) more checks ? + # update both redfish_obj and sunfish_obj with Fabric xref in Sunfish_RM new_obj_fabric_xref={"@odata.id":sunfish_obj["@odata.id"]} existing_obj_fabric_xref={"@odata.id":redfish_obj["@odata.id"]} if "MergedFabrics" in redfish_obj["Oem"]["Sunfish_RM"]: @@ -805,12 +813,19 @@ def updateIfMergedFabric(self,redfish_obj, sunfish_obj): redfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(new_obj_fabric_xref) print(f"redfish merged fabric object: {json.dumps(redfish_obj,indent=2)}") + if "MergedFabrics" in sunfish_obj["Oem"]["Sunfish_RM"]: + sunfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(existing_obj_fabric_xref) + else: + sunfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"] = [] + sunfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(existing_obj_fabric_xref) + print(f"sunfish merged fabric object: {json.dumps(sunfish_obj,indent=2)}") + else: print(f"----- not same fabrics") - return flag + return did_a_merge def renameUploadedObject(self,redfish_obj, aggregation_source): # redfish_obj uses agent namespace From e7468db69133be23670f0616bb3ed4f6306da8ea Mon Sep 17 00:00:00 2001 From: rherrell Date: Fri, 8 Nov 2024 06:49:57 -0700 Subject: [PATCH 21/28] Added the boundaryPort xreference handling Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 192 ++++++++++++++++-- 1 file changed, 172 insertions(+), 20 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index d9409dc..a7adfc3 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -416,6 +416,8 @@ def handleNestedObject(self, obj): # now need to revisit all uploaded objects and update any links renamed after # the uploaded object was written RedfishEventHandler.updateAllAliasedLinks(self,aggregation_source) + # now we need to re-direct any boundary port link references + # RedfishEventHandler.updateAllRedirectLinks(self, aggregation_source) return visited #why not the 'fetched' list? @@ -522,7 +524,13 @@ def fetchResource(self, obj_id, aggregation_source): if redfish_obj['@odata.id'] not in aggregation_source["Links"]["ResourcesAccessed"]: aggregation_source["Links"]["ResourcesAccessed"].append(redfish_obj['@odata.id']) return redfish_obj - + else: # Agent did not successfully return the obj_id sought + # we still need to check the link for an aliased parent segment + sunfish_aliased_URI = RedfishEventHandler.xlateToSunfishPath(self, obj_id, aggregation_source) + if obj_id != sunfish_aliased_URI: + RedfishEventHandler.updateSunfishAliasDB(self, sunfish_aliased_URI, obj_id, aggregation_source) + + def createInspectedObject(self,redfish_obj, aggregation_source): if '@odata.id' in redfish_obj: obj_path = os.path.relpath(redfish_obj['@odata.id'], self.conf['redfish_root']) @@ -532,22 +540,6 @@ def createInspectedObject(self,redfish_obj, aggregation_source): file_path = os.path.join(self.conf['redfish_root'], obj_path) logger.debug(f"try creating agent-named object: {file_path}") - ''' - if 'Collection' not in redfish_obj['@odata.type']: - # re-write this to explicitly check for object's existence in Sunfish! - try: - if self.get_object(file_path) == redfish_obj: - pass - elif self.get_object(file_path) != redfish_obj: - warnings.warn('Resource state changed') - except ResourceNotFound: - add_aggregation_source_reference(redfish_obj, aggregation_source) - # do we change the following to a simple FS write? - print(f"creating object: {file_path}") - self.create_object(file_path, redfish_obj) - else: - logger.debug("This is a collection") - ''' agent_redfish_URI = redfish_obj['@odata.id'] sunfish_aliased_URI = RedfishEventHandler.xlateToSunfishPath(self, agent_redfish_URI, aggregation_source) # @odata.id is the Agent-proposed path name, but we need to search for the Sunfish (aliased) name. @@ -598,6 +590,8 @@ def createInspectedObject(self,redfish_obj, aggregation_source): # here's where we check for a Fabric Object merge merged_fabrics =RedfishEventHandler.updateIfMergedFabric(self,redfish_obj, existing_obj) + if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": + RedfishEventHandler.track_boundary_port(self, redfish_obj, aggregation_source) print(f"creating renamed object: {file_path}") logger.info(f"creating renamed object: {file_path}") RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) @@ -620,6 +614,8 @@ def createInspectedObject(self,redfish_obj, aggregation_source): else: # assume new object, create it and its parent collection if needed add_aggregation_source_reference(redfish_obj, aggregation_source) print(f"creating object: {file_path}") + if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": + RedfishEventHandler.track_boundary_port(self, redfish_obj, aggregation_source) RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) return redfish_obj @@ -691,7 +687,7 @@ def updateAllAliasedLinks(self,aggregation_source): logger.debug(f"updating all objects for : {owning_agent_id}") agent_uploads=[] - # for every aggregation_source: + # for every aggregation_source with aliased links: if owning_agent_id in uri_aliasDB['Agents_xref_URIs']: # grab the k,v aliases structure and the list of URIs for owned objects if 'aliases' in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]: @@ -898,14 +894,170 @@ def renameUploadedObject(self,redfish_obj, aggregation_source): return redfish_obj + + def match_boundary_port(self, searching_agent_id, searching_port_URI, URI_aliasDB): + + matching_port_URIs = [] + searching_for = URI_aliasDB['Agents_xref_URIs'][searching_agent_id]\ + ['boundaryPorts'][searching_port_URI] + + if "RemoteLinkPartnerId" in searching_for: + searching_for_remote_partnerId =searching_for["RemoteLinkPartnerId"] + else: + searching_for_remote_partnerId = 'No remote partnerId' # do NOT use 'None' or "" + if "RemotePortId" in searching_for: + searching_for_remote_portId =searching_for["RemotePortId"] + else: + searching_for_remote_portId = 'No remote portId' # do NOT use 'None' or "" + if "LocalLinkPartnerId" in searching_for: + searching_for_local_partnerId =searching_for["LocalLinkPartnerId"] + else: + searching_for_local_partnerId = 'No local partnerId' # do NOT use 'None' or "" + if "LocalPortId" in searching_for: + searching_for_local_portId =searching_for["LocalPortId"] + else: + searching_for_local_portId = 'No local portId' # do NOT use 'None' or "" + + print(f"----- RemoteLinkPartnerId {searching_for_remote_partnerId}") + print(f"----- RemotePortId {searching_for_remote_portId}") + print(f"----- LocalLinkPartnerId {searching_for_local_partnerId}") + print(f"----- LocalPortId {searching_for_local_portId}") + logger.info(f"searching for match to {searching_port_URI}") + for agent_id, agent_db in URI_aliasDB['Agents_xref_URIs'].items(): + if agent_id != searching_agent_id and 'boundaryPorts' in agent_db: + print(f"----- checking boundaryPorts of {agent_id}") + for port_URI, port_details in agent_db['boundaryPorts'].items(): + # always check if the remote port device ID is found first + print(f"----- port_URI {port_URI}") + print(f"----- port_details {port_details}") + if ("LocalLinkPartnerId" in port_details) and \ + (port_details["LocalLinkPartnerId"] == searching_for_remote_partnerId) and \ + ("LocalPortId" in port_details) and \ + (port_details["LocalPortId"] == searching_for_remote_portId): + matching_port_URIs.append(port_URI) + # cross reference BOTH agent's boundaryPorts + print(f"----- found a matching port {port_URI}") + URI_aliasDB['Agents_xref_URIs'][agent_id]['boundaryPorts']\ + [port_URI]['PeerPortURI'] = searching_port_URI + URI_aliasDB['Agents_xref_URIs'][searching_agent_id]['boundaryPorts']\ + [searching_port_URI]['PeerPortURI'] = port_URI + # only check if the local port device ID is being waited on if first check fails + else: + if ("RemoteLinkPartnerId" in port_details) and \ + (port_details["RemoteLinkPartnerId"] == searching_for_local_partnerId) and \ + ("RemotePortId" in port_details) and \ + (port_details["RemotePortId"] == searching_for_local_portId): + matching_port_URIs.append(port_URI) + # cross reference BOTH agent's boundaryPorts + print(f"----- found a matching port {port_URI}") + URI_aliasDB['Agents_xref_URIs'][agent_id]['boundaryPorts']\ + [port_URI]['PeerPortURI'] = searching_port_URI + URI_aliasDB['Agents_xref_URIs'][searching_agent_id]['boundaryPorts']\ + [searching_port_URI]['PeerPortURI'] = port_URI + + + logger.debug(f"matching_ports {matching_port_URIs}") + return matching_port_URIs + + + + def track_boundary_port(self, redfish_obj, aggregation_source): + + agent_alias_dict = { + "aliases":{}, + "boundaryPorts":{} + } + + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + print(f"reading alias file {uri_alias_file}") + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + print(json.dumps(uri_aliasDB, indent = 4)) + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + print(f"---- now processing a boundary port") + logger.info(f"---- now processing a boundary port") + obj_type = redfish_obj["@odata.type"].split(".")[0] + obj_type = obj_type.replace("#","") + save_alias_file = False + print(f"---- sunfish URI {redfish_obj['@odata.id']}") + print(f"---- obj type {obj_type}") + port_protocol = redfish_obj["PortProtocol"] + port_type = redfish_obj["PortType"] + port_bc_flag = redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] + print(f"---- port_bc_flag {port_bc_flag}") + if obj_type == "Port" and port_bc_flag == "BoundaryPort": + print(f"---- CXL BoundaryPort") + owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] + localPortURI = redfish_obj['@odata.id'] + if port_protocol=="CXL" and port_type == "InterswitchPort": + print(f"---- CXL InterswitchPort") + print(f"---- owning_agent_id {owning_agent_id}") + print(f"---- localPortURI {localPortURI}") + # create a boundPort entry in uri_aliasDB + if owning_agent_id not in uri_aliasDB["Agents_xref_URIs"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = agent_alias_dict + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]={} + elif "boundaryPorts" not in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"] = {} + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI] = {} + + # log what the fabric port reports its own PortId and its own LinkPartnerId + if "CXL" in redfish_obj and "LinkPartnerTransmit" in redfish_obj["CXL"]: # rely on 'and' short circuiting + local_link_partner_id = redfish_obj["CXL"]["LinkPartnerTransmit"]["LinkPartnerId"] + local_port_id = redfish_obj["CXL"]["LinkPartnerTransmit"]["PortId"] + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ + ["LocalLinkPartnerId"] = local_link_partner_id + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ + ["LocalPortId"] = local_port_id + + # log if the fabric port reports its received LinkPartnerInfo from other end + if "CXL" in redfish_obj and "LinkPartnerReceive" in redfish_obj["CXL"]: # rely on 'and' short circuiting + remote_link_partner_id = redfish_obj["CXL"]["LinkPartnerReceive"]["LinkPartnerId"] + remote_port_id = redfish_obj["CXL"]["LinkPartnerReceive"]["PortId"] + print(f"---- obj link_partner_id {remote_link_partner_id}") + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ + ["RemoteLinkPartnerId"] =remote_link_partner_id + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ + ["RemotePortId"] = remote_port_id + + # now need to write aliasDB back to file + save_alias_file = True + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + print(json.dumps(uri_aliasDB, indent=2)) + else: + print(f"---- CXL BoundaryPort found, but not on CXL Fabric Link") + pass + matching_ports = RedfishEventHandler.match_boundary_port(self, owning_agent_id, localPortURI, uri_aliasDB) + if matching_ports or save_alias_file: + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + print(json.dumps(uri_aliasDB, indent=2)) + print(f"----- boundary ports matched {matching_ports}") + return + + + def add_aggregation_source_reference(redfish_obj, aggregation_source): - # BoundaryComponent = ["owned", "foreign", "non-boundary","unknown"] + # BoundaryComponent = ["owned", "foreign", "BoundaryLink","unknown"] oem = { "@odata.type": "#SunfishExtensions.v1_0_0.ResourceExtensions", "ManagingAgent": { "@odata.id": aggregation_source["@odata.id"] }, - "BoundaryComponent": "unknown" + "BoundaryComponent": "owned" } if "Oem" not in redfish_obj: redfish_obj["Oem"] = {"Sunfish_RM": oem} From edc58e55ba423972273e7c71a930ff1cddfea1df Mon Sep 17 00:00:00 2001 From: rherrell Date: Sat, 9 Nov 2024 16:15:02 -0700 Subject: [PATCH 22/28] fixed bugs in boundary port search and match Signed-off-by: rherrell --- .../events_handlers/redfish/redfish_event_handler.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index a7adfc3..e857310 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -898,6 +898,7 @@ def renameUploadedObject(self,redfish_obj, aggregation_source): def match_boundary_port(self, searching_agent_id, searching_port_URI, URI_aliasDB): matching_port_URIs = [] + # pull up the link partner dict for this agent.Port searching_for = URI_aliasDB['Agents_xref_URIs'][searching_agent_id]\ ['boundaryPorts'][searching_port_URI] @@ -999,8 +1000,8 @@ def track_boundary_port(self, redfish_obj, aggregation_source): print(f"---- CXL BoundaryPort") owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] localPortURI = redfish_obj['@odata.id'] - if port_protocol=="CXL" and port_type == "InterswitchPort": - print(f"---- CXL InterswitchPort") + if port_protocol=="CXL" and (port_type == "InterswitchPort" or port_type== "UpstreamPort"): + print(f"---- CXL {port_type}") print(f"---- owning_agent_id {owning_agent_id}") print(f"---- localPortURI {localPortURI}") # create a boundPort entry in uri_aliasDB @@ -1015,6 +1016,8 @@ def track_boundary_port(self, redfish_obj, aggregation_source): if "CXL" in redfish_obj and "LinkPartnerTransmit" in redfish_obj["CXL"]: # rely on 'and' short circuiting local_link_partner_id = redfish_obj["CXL"]["LinkPartnerTransmit"]["LinkPartnerId"] local_port_id = redfish_obj["CXL"]["LinkPartnerTransmit"]["PortId"] + if localPortURI not in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI] = {} uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ ["LocalLinkPartnerId"] = local_link_partner_id uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ @@ -1025,6 +1028,8 @@ def track_boundary_port(self, redfish_obj, aggregation_source): remote_link_partner_id = redfish_obj["CXL"]["LinkPartnerReceive"]["LinkPartnerId"] remote_port_id = redfish_obj["CXL"]["LinkPartnerReceive"]["PortId"] print(f"---- obj link_partner_id {remote_link_partner_id}") + if localPortURI not in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"]: + uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI] = {} uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ ["RemoteLinkPartnerId"] =remote_link_partner_id uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ @@ -1037,7 +1042,7 @@ def track_boundary_port(self, redfish_obj, aggregation_source): data_json.close() print(json.dumps(uri_aliasDB, indent=2)) else: - print(f"---- CXL BoundaryPort found, but not on CXL Fabric Link") + print(f"---- CXL BoundaryPort found, but not on InterswitchPort or UpstreamPort") pass matching_ports = RedfishEventHandler.match_boundary_port(self, owning_agent_id, localPortURI, uri_aliasDB) if matching_ports or save_alias_file: From 25e5bf7be1866bacb122c92971df76f6722e707f Mon Sep 17 00:00:00 2001 From: rherrell Date: Sun, 10 Nov 2024 23:56:50 -0700 Subject: [PATCH 23/28] Boundary Link bookkeeping and updating on upload now working Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 184 +++++++++++++++++- 1 file changed, 181 insertions(+), 3 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index e857310..9eb77b5 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -141,8 +141,6 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont # patch the aggregation_source object in storage with all the new resources found event_handler.core.storage_backend.patch(id, aggregation_source) - # before we are done, we have to process all renamed paths from this aggregation_source. - # Need to call the updateUploadedObjectPaths() utility return 200 @classmethod @@ -417,7 +415,9 @@ def handleNestedObject(self, obj): # the uploaded object was written RedfishEventHandler.updateAllAliasedLinks(self,aggregation_source) # now we need to re-direct any boundary port link references - # RedfishEventHandler.updateAllRedirectLinks(self, aggregation_source) + # this needs to be done on ALL agents, not just the one we just uploaded + #RedfishEventHandler.updateAllRedirectedLinks(self, aggregation_source) + RedfishEventHandler.updateAllAgentsRedirectedLinks(self) return visited #why not the 'fetched' list? @@ -747,6 +747,184 @@ def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): except: logger.error(f"could not update links in object {object_URI}") + def updateAllAgentsRedirectedLinks(self ): + # after renaming all links, need to redirect the placeholder links + # will eventually replace file read & load of aliasDB with aliasDB passed in as arg + try: + uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + print(f"reading alias file {uri_alias_file}") + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + print(json.dumps(uri_aliasDB, indent = 4)) + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + modified_aliasDB = False + for owning_agent_id in uri_aliasDB['Agents_xref_URIs']: + logger.debug(f"redirecting placeholder links in all boundary ports for : {owning_agent_id}") + print(f"redirecting placeholder links in all boundary ports for : {owning_agent_id}") + if owning_agent_id in uri_aliasDB['Agents_xref_URIs']: + if 'boundaryPorts' in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]: + for agent_bp_URI in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts']: + agent_bp_obj = self.storage_backend.read(agent_bp_URI) + print(f"------ redirecting links for {agent_bp_URI}") + # check PortType + if "PortType" in agent_bp_obj and agent_bp_obj["PortType"] == "InterswitchPort": + print(f"------ InterswitchPort") + if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: + print(f"------ PeerPortURI found") + RedfishEventHandler.redirectInterswitchLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) + modified_aliasDB = True + # need to replace the update object and re-save the uri_aliasDB + #print(f"------ redirected object is {json.dumps(agent_bp_obj, indent=4)}") + self.storage_backend.replace(agent_bp_obj) + else: + print(f"------ PeerPortURI NOT found") + pass + + elif "PortType" in agent_bp_obj and agent_bp_obj["PortType"] == "UpstreamPort": + print(f"------ UpstreamPort") + if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: + print(f"------ PeerPortURI found") + RedfishEventHandler.redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) + modified_aliasDB = True + # need to replace the update object and re-save the uri_aliasDB + print(f"------ redirected object is {json.dumps(agent_bp_obj, indent=4)}") + self.storage_backend.replace(agent_bp_obj) + else: + print(f"------ PeerPortURI NOT found") + pass + + + + + if modified_aliasDB: + with open(uri_alias_file,'w') as data_json: + json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) + data_json.close() + return + + + def redirectInterswitchLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): + + + logger.info(f"redirecting Interswitch ConnectedSwitches and ConnectedSwitchPorts") + print(f"------ redirecting Interswitch ConnectedSwitches and ConnectedSwitchPorts") + + agent_bp_URI = agent_bp_obj["@odata.id"] + redirected_CSP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["PeerPortURI"] + switch_uri_segments = redirected_CSP.split("/")[0:-2] + print(f"------ switch_uri_segments {switch_uri_segments}") + redirected_switch_link="" + for i in range(1,len(switch_uri_segments)): + redirected_switch_link = redirected_switch_link +"/" + switch_uri_segments[i] + print(f"------ redirected_switch_link is {redirected_switch_link}") + + if "Links" not in agent_bp_obj: + agent_bp_obj["Links"] = {} + if "ConnectedSwitchPorts" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedSwitchPorts"]=[] + if "ConnectedSwitches" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedSwitches"]=[] + if len(agent_bp_obj["Links"]["ConnectedSwitchPorts"]) >1: + logger.error(f"Interswitch Link claims >1 ConnectedSwitchPorts") + print(f"------ Interswitch Link claims >1 ConnectedSwitchPorts") + else: + if agent_bp_obj["Links"]["ConnectedSwitchPorts"]: + agent_placeholder_CSP = agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] = redirected_CSP + logger.info(f"redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") + print(f"------ redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CSP + else: # no placeholder links in ConnectedSwitchPorts array + agent_bp_obj["Links"]["ConnectedSwitchPorts"].append({"@odata.id":redirected_CSP}) + logger.info(f"created ConnectedSwitchPort to {redirected_CSP}") + print(f"------ created ConnectedSwitchPort to {redirected_CSP}") + + + if len(agent_bp_obj["Links"]["ConnectedSwitches"]) >1: + logger.error(f"Interswitch Link claims >1 ConnectedSwitches") + print(f"------ Interswitch Link claims >1 ConnectedSwitches") + else: + if agent_bp_obj["Links"]["ConnectedSwitches"]: + agent_placeholder_switch_link = agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] = redirected_switch_link + logger.info(f"redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") + print(f"------ redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerSwitchURI"] = agent_placeholder_switch_link + else: # no placeholder links in ConnectedSwitches array + agent_bp_obj["Links"]["ConnectedSwitches"].append({"@odata.id":redirected_switch_link}) + logger.info(f"created ConnectedSwitches to {redirected_switch_link}") + print(f"------ created ConnectedSwitches to {redirected_switch_link}") + + + def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): + + logger.info(f"redirecting UpstreamPort AssociatedEndpoints and ConnectedPorts") + print(f"------ redirecting UpstreamPort AssociatedEndpoints and ConnectedPorts") + + agent_bp_URI = agent_bp_obj["@odata.id"] + redirected_CP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["PeerPortURI"] + redirected_endpoint = "None" #for now, to test + + if "Links" not in agent_bp_obj: + agent_bp_obj["Links"] = {} + if "ConnectedPorts" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedPorts"]=[] + if "AssociatedEndpoints" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["AssociatedEndpoints"]=[] + + if len(agent_bp_obj["Links"]["ConnectedPorts"]) >1: + logger.error(f"UpstreamPort Link claims >1 ConnectedPorts") + print(f"------ UpstreamPort Link claims >1 ConnectedPorts") + else: + if agent_bp_obj["Links"]["ConnectedPorts"]: + agent_placeholder_CP = agent_bp_obj["Links"]["ConnectedPorts"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedPorts"][0]["@odata.id"] = redirected_CP + logger.info(f"redirected {agent_placeholder_CP} to \n------ {redirected_CP}") + print(f"------ redirected {agent_placeholder_CP} to \n------ {redirected_CP}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CP + else: # no placeholder links in ConnectedSwitchPorts array + agent_bp_obj["Links"]["ConnectedPorts"].append({"@odata.id":redirected_CP}) + logger.info(f"created ConnectedPorts to {redirected_CSP}") + print(f"------ created ConnectedPorts to {redirected_CSP}") + + + if len(agent_bp_obj["Links"]["AssociatedEndpoints"]) >1: + logger.error(f"UpstreamPort Link claims >1 AssociatedEndpoints") + print(f"------ UpstreamPort Link claims >1 AssociatedEndpoints") + else: + if agent_bp_obj["Links"]["AssociatedEndpoints"]: + agent_placeholder_endpoint = agent_bp_obj["Links"]["AssociatedEndpoints"][0]["@odata.id"] + agent_bp_obj["Links"]["AssociatedEndpoints"][0]["@odata.id"] = redirected_endpoint + logger.info(f"redirected {agent_placeholder_endpoint} to \n------ {redirected_endpoint}") + print(f"------ redirected {agent_placeholder_endpoint} to \n------ {redirected_endpoint}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerEndpointURI"] = agent_placeholder_endpoint + else: # no placeholder links in AssociatedEndpoints array + agent_bp_obj["Links"]["AssociatedEndpoints"].append({"@odata.id":redirected_endpoint}) + logger.info(f"created AssociatedEndpoints to {redirected_endpoint}") + print(f"------ created AssociatedEndpoints to {redirected_endpoint}") + + + + def updateSunfishAliasDB(self,sunfish_URI, agent_URI, aggregation_source): try: uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') From 96fca5581b86b8278fe6810a6e14d1b50ac3af77 Mon Sep 17 00:00:00 2001 From: rherrell Date: Tue, 12 Nov 2024 18:22:01 -0700 Subject: [PATCH 24/28] properly handle redirecting host DSP Endpoints and switch links at boundary ports Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 92 +++++++++++++++++-- 1 file changed, 86 insertions(+), 6 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 9eb77b5..85e5ea4 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -778,6 +778,7 @@ def updateAllAgentsRedirectedLinks(self ): # check PortType if "PortType" in agent_bp_obj and agent_bp_obj["PortType"] == "InterswitchPort": print(f"------ InterswitchPort") + # We are assuming if one end of link is ISL, both must be if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: print(f"------ PeerPortURI found") RedfishEventHandler.redirectInterswitchLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) @@ -789,7 +790,7 @@ def updateAllAgentsRedirectedLinks(self ): print(f"------ PeerPortURI NOT found") pass - elif "PortType" in agent_bp_obj and agent_bp_obj["PortType"] == "UpstreamPort": + elif "PortType" in agent_bp_obj and (agent_bp_obj["PortType"] == "UpstreamPort" ): print(f"------ UpstreamPort") if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: print(f"------ PeerPortURI found") @@ -802,6 +803,18 @@ def updateAllAgentsRedirectedLinks(self ): print(f"------ PeerPortURI NOT found") pass + elif "PortType" in agent_bp_obj and (agent_bp_obj["PortType"] == "DownstreamPort" ): + print(f"------ DownstreamPort") + if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: + print(f"------ PeerPortURI found") + RedfishEventHandler.redirectDownstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) + modified_aliasDB = True + # need to replace the update object and re-save the uri_aliasDB + print(f"------ redirected object is {json.dumps(agent_bp_obj, indent=4)}") + self.storage_backend.replace(agent_bp_obj) + else: + print(f"------ PeerPortURI NOT found") + pass @@ -878,7 +891,17 @@ def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): agent_bp_URI = agent_bp_obj["@odata.id"] redirected_CP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ ['boundaryPorts'][agent_bp_URI]["PeerPortURI"] - redirected_endpoint = "None" #for now, to test + # find the parent (assumed to be a host) obj of this peer port + host_uri_segments = redirected_CP.split("/")[0:-2] + host_link="" + for i in range(1,len(host_uri_segments)): + host_link = host_link +"/" + host_uri_segments[i] + print(f"------ host_link is {host_link}") + + # extract the Endpoint URI associated with this parent object + host_obj = self.storage_backend.read(host_link) + redirected_endpoint = host_obj["Links"]["Endpoints"][0]["@odata.id"] + #redirected_endpoint = "None" #for now, to test if "Links" not in agent_bp_obj: agent_bp_obj["Links"] = {} @@ -901,8 +924,8 @@ def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CP else: # no placeholder links in ConnectedSwitchPorts array agent_bp_obj["Links"]["ConnectedPorts"].append({"@odata.id":redirected_CP}) - logger.info(f"created ConnectedPorts to {redirected_CSP}") - print(f"------ created ConnectedPorts to {redirected_CSP}") + logger.info(f"created ConnectedPorts to {redirected_CP}") + print(f"------ created ConnectedPorts to {redirected_CP}") if len(agent_bp_obj["Links"]["AssociatedEndpoints"]) >1: @@ -922,8 +945,63 @@ def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): logger.info(f"created AssociatedEndpoints to {redirected_endpoint}") print(f"------ created AssociatedEndpoints to {redirected_endpoint}") + def redirectDownstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): + logger.info(f"redirecting Downstream ConnectedSwitches and ConnectedSwitchPorts") + print(f"------ redirecting Downstream ConnectedSwitches and ConnectedSwitchPorts") + agent_bp_URI = agent_bp_obj["@odata.id"] + redirected_CSP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["PeerPortURI"] + switch_uri_segments = redirected_CSP.split("/")[0:-2] + print(f"------ switch_uri_segments {switch_uri_segments}") + redirected_switch_link="" + for i in range(1,len(switch_uri_segments)): + redirected_switch_link = redirected_switch_link +"/" + switch_uri_segments[i] + print(f"------ redirected_switch_link is {redirected_switch_link}") + + if "Links" not in agent_bp_obj: + agent_bp_obj["Links"] = {} + if "ConnectedSwitchPorts" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedSwitchPorts"]=[] + if "ConnectedSwitches" not in agent_bp_obj["Links"]: + agent_bp_obj["Links"]["ConnectedSwitches"]=[] + if len(agent_bp_obj["Links"]["ConnectedSwitchPorts"]) >1: + logger.error(f"Downstream Link claims >1 ConnectedSwitchPorts") + print(f"------ Downstream Link claims >1 ConnectedSwitchPorts") + else: + if agent_bp_obj["Links"]["ConnectedSwitchPorts"]: + agent_placeholder_CSP = agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] = redirected_CSP + logger.info(f"redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") + print(f"------ redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CSP + else: # no placeholder links in ConnectedSwitchPorts array + agent_bp_obj["Links"]["ConnectedSwitchPorts"].append({"@odata.id":redirected_CSP}) + logger.info(f"created ConnectedSwitchPort to {redirected_CSP}") + print(f"------ created ConnectedSwitchPort to {redirected_CSP}") + + + if len(agent_bp_obj["Links"]["ConnectedSwitches"]) >1: + logger.error(f"Downstream Link claims >1 ConnectedSwitches") + print(f"------ Downstream Link claims >1 ConnectedSwitches") + else: + if agent_bp_obj["Links"]["ConnectedSwitches"]: + agent_placeholder_switch_link = agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] + agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] = redirected_switch_link + logger.info(f"redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") + print(f"------ redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") + # save the original agent placeholder in the uri_aliasDB + uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ + ['boundaryPorts'][agent_bp_URI]["AgentPeerSwitchURI"] = agent_placeholder_switch_link + else: # no placeholder links in ConnectedSwitches array + agent_bp_obj["Links"]["ConnectedSwitches"].append({"@odata.id":redirected_switch_link}) + logger.info(f"created ConnectedSwitches to {redirected_switch_link}") + print(f"------ created ConnectedSwitches to {redirected_switch_link}") + + def updateSunfishAliasDB(self,sunfish_URI, agent_URI, aggregation_source): try: @@ -1101,6 +1179,7 @@ def match_boundary_port(self, searching_agent_id, searching_port_URI, URI_aliasD print(f"----- RemotePortId {searching_for_remote_portId}") print(f"----- LocalLinkPartnerId {searching_for_local_partnerId}") print(f"----- LocalPortId {searching_for_local_portId}") + print(f"----- searching for match to {searching_port_URI}") logger.info(f"searching for match to {searching_port_URI}") for agent_id, agent_db in URI_aliasDB['Agents_xref_URIs'].items(): if agent_id != searching_agent_id and 'boundaryPorts' in agent_db: @@ -1178,7 +1257,8 @@ def track_boundary_port(self, redfish_obj, aggregation_source): print(f"---- CXL BoundaryPort") owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] localPortURI = redfish_obj['@odata.id'] - if port_protocol=="CXL" and (port_type == "InterswitchPort" or port_type== "UpstreamPort"): + if port_protocol=="CXL" and (port_type == "InterswitchPort" or \ + port_type== "UpstreamPort" or port_type== "DownstreamPort"): print(f"---- CXL {port_type}") print(f"---- owning_agent_id {owning_agent_id}") print(f"---- localPortURI {localPortURI}") @@ -1220,7 +1300,7 @@ def track_boundary_port(self, redfish_obj, aggregation_source): data_json.close() print(json.dumps(uri_aliasDB, indent=2)) else: - print(f"---- CXL BoundaryPort found, but not on InterswitchPort or UpstreamPort") + print(f"---- CXL BoundaryPort found, but not InterswitchPort, UpstreamPort, or DownstreamPort") pass matching_ports = RedfishEventHandler.match_boundary_port(self, owning_agent_id, localPortURI, uri_aliasDB) if matching_ports or save_alias_file: From 845ce42d56f7330dfcb8204347b55389cbc336c9 Mon Sep 17 00:00:00 2001 From: rherrell Date: Thu, 14 Nov 2024 22:32:31 -0700 Subject: [PATCH 25/28] fixed the forward_to_agent bug in core.py, added renaming to/from the agent Signed-off-by: rherrell --- sunfish/lib/core.py | 18 +- .../sunfish_agent/sunfish_agent_manager.py | 187 +++++++++++++++++- 2 files changed, 196 insertions(+), 9 deletions(-) diff --git a/sunfish/lib/core.py b/sunfish/lib/core.py index 74a6606..27d7f79 100644 --- a/sunfish/lib/core.py +++ b/sunfish/lib/core.py @@ -232,6 +232,7 @@ def replace_object(self, path: str, payload: dict): str|exception: return the replaced resource or an exception in case of fault. """ object_type = self._get_type(payload, path=path) + payload_to_write = payload # we assume no changes can be done on collections if "Collection" in object_type: raise CollectionNotSupported() @@ -239,9 +240,12 @@ def replace_object(self, path: str, payload: dict): # 1. check the path target of the operation exists self.storage_backend.read(path) # 2. is needed first forward the request to the agent managing the object - self.objects_manager.forward_to_manager(SunfishRequestType.REPLACE, path, payload=payload) + #self.objects_manager.forward_to_manager(SunfishRequestType.REPLACE, path, payload=payload) + agent_response = self.objects_manager.forward_to_manager(SunfishRequestType.REPLACE, path, payload=payload) + if agent_response: + payload_to_write = agent_response # 3. Execute any custom handler for this object type - self.objects_handler.dispatch(object_type, path, SunfishRequestType.REPLACE, payload=payload) + self.objects_handler.dispatch(object_type, path, SunfishRequestType.REPLACE, payload=payload_to_write) except ResourceNotFound: logger.error(logger.error(f"The resource to be replaced ({path}) does not exist.")) except AttributeError: @@ -249,7 +253,7 @@ def replace_object(self, path: str, payload: dict): logger.debug(f"The object {object_type} does not have a custom handler") pass # 4. persist change in Sunfish tree - return self.storage_backend.replace(payload) + return self.storage_backend.replace(payload_to_write) def patch_object(self, path: str, payload: dict): """Calls the correspondent patch function from the backend implementation. @@ -261,6 +265,7 @@ def patch_object(self, path: str, payload: dict): str|exception: return the updated resource or an exception in case of fault. """ # we assume no changes can be done on collections + payload_to_write = payload obj = self.storage_backend.read(path) object_type = self._get_type(obj, path=path) if "Collection" in object_type: @@ -269,7 +274,10 @@ def patch_object(self, path: str, payload: dict): # 1. check the path target of the operation exists self.storage_backend.read(path) # 2. is needed first forward the request to the agent managing the object - self.objects_manager.forward_to_manager(SunfishRequestType.PATCH, path, payload=payload) + #self.objects_manager.forward_to_manager(SunfishRequestType.PATCH, path, payload=payload) + agent_response = self.objects_manager.forward_to_manager(SunfishRequestType.PATCH, path, payload=payload) + if agent_response: + payload_to_write = agent_response # 3. Execute any custom handler for this object type self.objects_handler.dispatch(object_type, path, SunfishRequestType.PATCH, payload=payload) except ResourceNotFound: @@ -280,7 +288,7 @@ def patch_object(self, path: str, payload: dict): pass # 4. persist change in Sunfish tree - return self.storage_backend.patch(path, payload) + return self.storage_backend.patch(path, payload_to_write) def delete_object(self, path: string): """Calls the correspondent remove function from the backend implementation. Checks that the path is valid. diff --git a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py index 3b1d38c..f163c83 100644 --- a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py +++ b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py @@ -5,6 +5,8 @@ import string from typing import Optional import pdb +import os +import json import sunfish.lib.core from sunfish_plugins.objects_managers.sunfish_agent.agents_management import Agent @@ -21,8 +23,13 @@ def __init__(self, core: 'sunfish.lib.core.Core'): self.core = core def forward_to_manager(self, request_type: 'sunfish.models.types.SunfishRequestType', path: string, payload: dict = None) -> Optional[dict]: + uri_aliasDB = {} agent_response = None + object_modified = False path_to_check = path + print(f"!!obj path to foward is {path}") + print(f"!!request_type is {request_type}") + #pdb.set_trace() if request_type == SunfishRequestType.CREATE: # When creating an object, the request must be done on the collection. Since collections are generally not # marked with the managing agent we check whether the parent of the collection, that must be a single entity @@ -40,14 +47,15 @@ def forward_to_manager(self, request_type: 'sunfish.models.types.SunfishRequestT path_to_check = "".join(f"/{e}" for e in path_elems) # get the parent path logger.debug(f"Checking managing agent for path: {path_to_check}") - #pdb.set_trace() agent = Agent.is_agent_managed(self.core, path_to_check) + print(f"managing agent is {agent}") if agent: logger.debug(f"{path} is managed by an agent, forwarding the request") - #agent_json = sunfish_core.storage_backend.read(agent) - #agent_uri = agent_json["Hostname"] + obj_modified = self.xlateToAgentURIs(payload) + # extract restored name from payload + restored_path = payload["@odata.id"] try: - agent_response = agent.forward_request(request_type, path, payload=payload) + agent_response = agent.forward_request(request_type, restored_path, payload=payload) except AgentForwardingFailure as e: raise e @@ -75,7 +83,178 @@ def forward_to_manager(self, request_type: 'sunfish.models.types.SunfishRequestT agent_response["Oem"]["Sunfish_RM"]["ManagingAgent"] = { "@odata.id": agent.get_id() } + # anything un-aliased for agent has to be undone + # anything added by agent may need translated + obj_modified = self.xlateToSunfishURIs(agent_response) else: logger.debug(f"{path} is not managed by an agent") + return agent_response + + def xlateToAgentURIs(self, sunfish_obj ): + + def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): + nestedPaths = [] + if type(obj) == list: + i = 0; + for entry in obj: + if type(entry) == list or type(entry) == dict: + nestedPaths.extend( findNestedURIs(self, URI_to_match, URI_to_sub, entry, path_to_nested_URI+"["+str(i)+"]")) + else: + i=i+1 + if type(obj) == dict: + for key,value in obj.items(): + if key == '@odata.id'and path_to_nested_URI != "": + # check @odata.id: value for an alias + if value == URI_to_match: + print(f"---- modifying {value} to {URI_to_sub}") + obj[key] = URI_to_sub + nestedPaths.append(path_to_nested_URI) + elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): + nestedPaths.extend(findNestedURIs(self, URI_to_match, URI_to_sub, value, path_to_nested_URI+"["+key+"]" )) + return nestedPaths + + + try: + uri_alias_file = os.path.join(os.getcwd(), self.core.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + print(f"reading alias file {uri_alias_file}") + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + try: + sunfish_aliases = uri_aliasDB["Sunfish_xref_URIs"]["aliases"] + pdb.set_trace() + object_URI = sunfish_obj["@odata.id"] + aliasedNestedPaths=[] + obj_modified = False + # check the obj ID and initial @odata.id + if object_URI in sunfish_aliases: + sunfish_obj["@odata.id"] = sunfish_aliases[object_URI][0] + obj_modified = True + if sunfish_obj["Id"] == object_URI.split("/")[-1]: + sunfish_obj["Id"] = sunfish_aliases[object_URI][0].split("/")[-1] + # now find the nested @odata.id URIs and check them + for sunfish_URI, agent_URI in sunfish_aliases.items(): + # find all the references to the aliased sunfish_URI and replace it + path_to_nested_URI="" + # TODO agent_URI structure is a list, not a simple text string, v hence this index! + aliasedNestedPaths= findNestedURIs(self, sunfish_URI, agent_URI[0], sunfish_obj, path_to_nested_URI ) + if aliasedNestedPaths: + obj_modified = True + for path in aliasedNestedPaths: + print(f"---- replaced {sunfish_URI} with {agent_URI} at {path}") + print(f"---- aliasedNestedPaths is {aliasedNestedPaths}") + if obj_modified: + logger.debug(f"---- object modified") + print(f"---- final updated object") + print(json.dumps(sunfish_obj, indent=2)) + pass + + if "Oem" in sunfish_obj and "Sunfish_RM" in sunfish_obj["Oem"] and \ + "BoundaryComponent" in sunfish_obj["Oem"]["Sunfish_RM"]: + if sunfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": + # need to check for boundary port redirected links + # TODO + print(f"------ checking for redirected boundary link") + pass + + except: + logger.error(f"could not update links in object {object_URI}") + + #return sunfish_obj + return obj_modified + + + def xlateToSunfishURIs(self, agent_obj ): + + def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): + nestedPaths = [] + if type(obj) == list: + i = 0; + for entry in obj: + if type(entry) == list or type(entry) == dict: + nestedPaths.extend( findNestedURIs(self, URI_to_match, URI_to_sub, entry, path_to_nested_URI+"["+str(i)+"]")) + else: + i=i+1 + if type(obj) == dict: + for key,value in obj.items(): + if key == '@odata.id'and path_to_nested_URI != "": + # check @odata.id: value for an alias + if value == URI_to_match: + print(f"---- modifying {value} to {URI_to_sub}") + obj[key] = URI_to_sub + nestedPaths.append(path_to_nested_URI) + elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): + nestedPaths.extend(findNestedURIs(self, URI_to_match, URI_to_sub, value, path_to_nested_URI+"["+key+"]" )) + return nestedPaths + + + try: + uri_alias_file = os.path.join(os.getcwd(), self.core.conf["backend_conf"]["fs_private"], 'URI_aliases.json') + if os.path.exists(uri_alias_file): + print(f"reading alias file {uri_alias_file}") + with open(uri_alias_file, 'r') as data_json: + uri_aliasDB = json.load(data_json) + data_json.close() + else: + print(f"alias file {uri_alias_file} not found") + raise Exception + + except: + raise Exception + + + try: + pdb.set_trace() + owning_agent_id = agent_obj["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"].split("/")[-1] + agent_aliases = uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] + object_URI = agent_obj["@odata.id"] + aliasedNestedPaths=[] + obj_modified = False + # check the obj ID and initial @odata.id + if object_URI in agent_aliases: + agent_obj["@odata.id"] = agent_aliases[object_URI] + obj_modified = True + if agent_obj["Id"] == object_URI.split("/")[-1]: + agent_obj["Id"] = agent_aliases[object_URI].split("/")[-1] + # now find the nested @odata.id URIs and check them + for agent_URI,sunfish_URI in agent_aliases.items(): + # find all the references to the aliased sunfish_URI and replace it + path_to_nested_URI="" + # TODO agent_URI structure is a list, not a simple text string, v hence this index! + aliasedNestedPaths= findNestedURIs(self, agent_URI, sunfish_URI, agent_obj, path_to_nested_URI ) + if aliasedNestedPaths: + obj_modified = True + for path in aliasedNestedPaths: + print(f"---- replaced {agent_URI } with {sunfish_URI} at {path}") + if obj_modified: + logger.debug(f"---- object modified") + print(f"---- final updated object") + print(json.dumps(agent_obj, indent=2)) + pass + + if "Oem" in agent_obj and "Sunfish_RM" in agent_obj["Oem"] and \ + "BoundaryComponent" in agent_obj["Oem"]["Sunfish_RM"]: + if agent_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": + # need to check for boundary port redirected links + # TODO + print(f"------ checking for redirected boundary link") + pass + + except: + logger.error(f"could not update links in object {object_URI}") + + #return sunfish_obj + return obj_modified + + From 66044f910b143ab372947c4ba85e38e61ff08a71 Mon Sep 17 00:00:00 2001 From: rherrell Date: Mon, 18 Nov 2024 09:33:59 -0700 Subject: [PATCH 26/28] eliminated pdb.settrace() calls for demo Signed-off-by: rherrell --- .../events_handlers/redfish/redfish_event_handler.py | 6 +++--- .../objects_managers/sunfish_agent/sunfish_agent_manager.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 85e5ea4..4ea40b7 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -78,7 +78,7 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event @classmethod def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, context: str): # incoming context (an aggregation_source ID) comes from event sender - pdb.set_trace() + #pdb.set_trace() if context == "": raise PropertyNotFound("Missing agent context in ResourceCreated event") # put the global definition and initial loading of sunfishAliasDB dictionary here @@ -159,7 +159,7 @@ def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context hostname = event['MessageArgs'][1] # target address destination = hostname + "/EventListener" # may match a Subscription object's 'Destination' property logger.debug(f"path of file_to_send is {file_to_send}") - pdb.set_trace() + #pdb.set_trace() try: if os.path.exists(file_to_send): with open(file_to_send, 'r') as data_json: @@ -339,7 +339,7 @@ def check_subdirs(self, origin): def find_subscriber_context(self, destination): # look up the subscriber's "Context" for the given event Destination - pdb.set_trace() + #pdb.set_trace() context = "" try: subscribers_list = self.storage_backend.read( diff --git a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py index f163c83..3d86441 100644 --- a/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py +++ b/sunfish_plugins/objects_managers/sunfish_agent/sunfish_agent_manager.py @@ -133,7 +133,7 @@ def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): try: sunfish_aliases = uri_aliasDB["Sunfish_xref_URIs"]["aliases"] - pdb.set_trace() + #pdb.set_trace() object_URI = sunfish_obj["@odata.id"] aliasedNestedPaths=[] obj_modified = False @@ -215,7 +215,7 @@ def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): try: - pdb.set_trace() + #pdb.set_trace() owning_agent_id = agent_obj["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"].split("/")[-1] agent_aliases = uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] object_URI = agent_obj["@odata.id"] From a66ea25e87411b4d61acb4c06e69d121a03f7e25 Mon Sep 17 00:00:00 2001 From: rherrell Date: Mon, 2 Dec 2024 16:05:27 -0700 Subject: [PATCH 27/28] minor debug vs prinf statement changes Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index eb328e8..17e2659 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -112,6 +112,7 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont # here we are assuming that we are getting a fully populated redfish # object from the agent. if "@odata.id" not in response: + # should never hit this! logger.warning(f"Resource {id} did not have @odata.id set when retrieved from Agent. Initializing its value with {id}") response["odata.id"] = id @@ -122,16 +123,10 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont resource, 'index.json') if not os.path.exists(fs_full_path): RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) - else: # could be a second agent with naming conflicts - logger.error(f"resource to create: {id} already exists.") - # let's run the inspection process on it + else: # could be a second agent with naming conflicts, or same agent with duplicate + logger.warning(f"resource to create: {id} already exists.") + # run the inspection process on it to find cause of warning RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) - # eventually we need to resolve the URI conflict by checking that the - # aggregation_source of the existing obj is the same aggregation_source - # which just sent this CreateResource event, making this a duplicate attempt. - # if this is a different aggregation_source, we have a naming conflict - # to handle inside the createInspectedObject() routine - #raise AlreadyExists(id) # patch the aggregation_source object in storage with all the new resources found @@ -519,7 +514,8 @@ def fetchResource(self, obj_id, aggregation_source): aggregation_source["Links"]["ResourcesAccessed"].append(redfish_obj['@odata.id']) return redfish_obj else: # Agent did not successfully return the obj_id sought - # we still need to check the link for an aliased parent segment + # we still need to check the obj_id for an aliased parent segment + # so we detect renamed navigation links sunfish_aliased_URI = RedfishEventHandler.xlateToSunfishPath(self, obj_id, aggregation_source) if obj_id != sunfish_aliased_URI: RedfishEventHandler.updateSunfishAliasDB(self, sunfish_aliased_URI, obj_id, aggregation_source) @@ -546,6 +542,7 @@ def createInspectedObject(self,redfish_obj, aggregation_source): if redfish_obj['Id'] == agent_redfish_URI.split("/")[-1]: redfish_obj['Id'] = sunfish_aliased_URI.split("/")[-1] print(f"xlated agent_redfish_URI is {sunfish_aliased_URI}") + logger.debug(f"xlated agent_redfish_URI is {sunfish_aliased_URI}") if 'Collection' in redfish_obj['@odata.type']: logger.debug("This is a collection, ignore it until we need it") pass @@ -560,6 +557,7 @@ def createInspectedObject(self,redfish_obj, aggregation_source): existing_obj = self.get_object(file_path) existing_agent_uri = existing_obj["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] print(f"managingAgent of Sunfish {obj_path} is {uploading_agent_uri}") + logger.debug(f"managingAgent of Sunfish {obj_path} is {uploading_agent_uri}") if existing_agent_uri == uploading_agent_uri: # we have a duplicate posting of the object from same agent # check if existing Sunfish object is same as that being fetched from aggregation_source From b3acaec964cfb055d6d97b1c4329592bf4910b45 Mon Sep 17 00:00:00 2001 From: rherrell Date: Fri, 13 Dec 2024 16:39:01 -0700 Subject: [PATCH 28/28] fixed a merge bug, eliminated renaming members of collections, removed most print statements Signed-off-by: rherrell --- .../redfish/redfish_event_handler.py | 319 +++++++----------- 1 file changed, 123 insertions(+), 196 deletions(-) diff --git a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py index 17e2659..3cb258b 100644 --- a/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py +++ b/sunfish_plugins/events_handlers/redfish/redfish_event_handler.py @@ -53,7 +53,7 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event aggregation_source_id = str(uuid.uuid4()) aggregation_source_template = { "@odata.type": "#AggregationSource.v1_2_.AggregationSource", - "@odata.id": f"{event_handler.core.conf['redfish_root']}/AggregationService/AggregationSources/{aggregation_source_id}", + "@odata.id": f"{event_handler.core.conf['redfish_root']}AggregationService/AggregationSources/{aggregation_source_id}", "HostName": hostname, "Id": aggregation_source_id, "Links": { @@ -100,17 +100,18 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont aggregation_source = event_handler.core.storage_backend.read(agg_src_path) else: raise PropertyNotFound("Cannot find aggregation source; file does not exist") + # fetch the actual resource to be created from agent hostname = aggregation_source["HostName"] response = requests.get(f"{hostname}/{id}") if response.status_code != 200: raise ResourceNotFound("Aggregation source read from Agent failed") response = response.json() - print(f"new resource is \n") - print(json.dumps(response, indent=4)) + logger.info(f"new resource is \n") + logger.info(json.dumps(response, indent=4)) # here we are assuming that we are getting a fully populated redfish - # object from the agent. + # object from the agent. Add real tests here! if "@odata.id" not in response: # should never hit this! logger.warning(f"Resource {id} did not have @odata.id set when retrieved from Agent. Initializing its value with {id}") @@ -123,9 +124,10 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont resource, 'index.json') if not os.path.exists(fs_full_path): RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) - else: # could be a second agent with naming conflicts, or same agent with duplicate + else: logger.warning(f"resource to create: {id} already exists.") - # run the inspection process on it to find cause of warning + # could be a second agent with naming conflicts, or same agent with duplicate + # still run the inspection process on it to find cause of warning RedfishEventHandler.bfsInspection(event_handler.core, response, aggregation_source) @@ -301,7 +303,6 @@ def forward_event(self, list, payload): path = os.path.join(self.redfish_root, 'EventService', 'Subscriptions', id) try: data = self.core.storage_backend.read(path) - # print('send to: ', data["Id"]) resp = requests.post(data['Destination'], json=payload) resp.raise_for_status() except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e: @@ -366,16 +367,15 @@ def handleNestedObject(self, obj): if type(obj) == dict: for key,value in obj.items(): if key == '@odata.id': - print(f"found nested URL to Redfish obj {value}") RedfishEventHandler.handleEntryIfNotVisited(self, value, visited, queue) elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): handleNestedObject(self, value) # need to ignore Sunfish_RM paths; they are wrong namespace while queue: queue = sorted(queue) - print(f"sorted queue: \n{queue}") id = queue.pop(0) - redfish_obj = RedfishEventHandler.fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched) + redfish_obj = RedfishEventHandler.fetchResourceAndTree(self, id, aggregation_source, \ + visited, queue, fetched) if redfish_obj is None: # we failed to locate it in aggregation_source notfound.append(id) @@ -385,27 +385,20 @@ def handleNestedObject(self, obj): for key, val in redfish_obj.items(): if key == '@odata.id': - #RedfishEventHandler.handleEntryIfNotVisited(self, val, visited, queue) - print(f"ignored top-level @odata.id to Redfish obj {val}") pass - #elif key == 'Links': - # if type(val)==dict or type(val)==list: - # handleNestedObject(self, val) - # # keep extracting nested @odata.id references from the currently fetched object elif type(val) == list or type(val) == dict: handleNestedObject(self, val) - print("\n\nattempted to fetch the following URIs:\n") - print(json.dumps(sorted(fetched),indent = 4)) - print("\n\nAgent did not return objects for the following URIs:\n") - print(json.dumps(sorted(notfound),indent = 4)) + logger.info("\n\nattempted to fetch the following URIs:\n") + logger.info(json.dumps(sorted(fetched),indent = 4)) + logger.info("\n\nAgent did not return objects for the following URIs:\n") + logger.info(json.dumps(sorted(notfound),indent = 4)) # now need to revisit all uploaded objects and update any links renamed after # the uploaded object was written RedfishEventHandler.updateAllAliasedLinks(self,aggregation_source) # now we need to re-direct any boundary port link references # this needs to be done on ALL agents, not just the one we just uploaded - #RedfishEventHandler.updateAllRedirectedLinks(self, aggregation_source) RedfishEventHandler.updateAllAgentsRedirectedLinks(self) return visited #why not the 'fetched' list? @@ -477,22 +470,19 @@ def handleEntryIfNotVisited(self,entry, visited, queue): def fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched): # if have no parent dirs path_nodes = id.split("/") need_parent_prefetch = False - print(f"fetchResourceAndTree path_nodes {path_nodes}") for node_position in range(4, len(path_nodes) - 1): redfish_path = f'/redfish/v1/{"/".join(path_nodes[3:node_position + 1])}' logger.info(f"Checking redfish path: {redfish_path}") - print(f"do we need to visit path {redfish_path} ?") if redfish_path not in visited: need_parent_prefetch = True logger.info(f"Inspect redfish path: {redfish_path}") - print(f"yes, adding redfish path to queue: {redfish_path}") queue.append(redfish_path) visited.append(redfish_path) if need_parent_prefetch: # requeue this id and return 'None' queue.append(id) else: # all grand-parent objects have been visited # go get this object from the aggregation_source - print(f"fetchResourceAndTree fetching object {id}") + # fetchResource() will also create the Sunfish copy, if appropriate redfish_obj = RedfishEventHandler.fetchResource(self, id, aggregation_source) fetched.append(id) return redfish_obj @@ -500,13 +490,13 @@ def fetchResourceAndTree(self, id, aggregation_source, visited, queue, fetched): def fetchResource(self, obj_id, aggregation_source): # only called if all grand-parent objects have been put in queue, sorted, inspected, and already fetched. # The parent object, if not a collection, will also have already been fetched + # this routine will also call create and/or merge the object into Sunfish database resource_endpoint = aggregation_source["HostName"] + obj_id logger.info(f"fetch: {resource_endpoint}") response = requests.get(resource_endpoint) if response.status_code == 200: # Agent must have returned this object redfish_obj = response.json() - print(f"successfully fetched {obj_id}") # now rename if necessary and copy object into Sunfish inventory redfish_obj = RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source) @@ -541,7 +531,6 @@ def createInspectedObject(self,redfish_obj, aggregation_source): if 'Id' in redfish_obj: if redfish_obj['Id'] == agent_redfish_URI.split("/")[-1]: redfish_obj['Id'] = sunfish_aliased_URI.split("/")[-1] - print(f"xlated agent_redfish_URI is {sunfish_aliased_URI}") logger.debug(f"xlated agent_redfish_URI is {sunfish_aliased_URI}") if 'Collection' in redfish_obj['@odata.type']: logger.debug("This is a collection, ignore it until we need it") @@ -555,59 +544,62 @@ def createInspectedObject(self,redfish_obj, aggregation_source): if os.path.exists(fs_full_path): uploading_agent_uri= aggregation_source["@odata.id"] existing_obj = self.get_object(file_path) + modified_existing_obj = False existing_agent_uri = existing_obj["Oem"]["Sunfish_RM"]["ManagingAgent"]["@odata.id"] - print(f"managingAgent of Sunfish {obj_path} is {uploading_agent_uri}") logger.debug(f"managingAgent of Sunfish {obj_path} is {uploading_agent_uri}") if existing_agent_uri == uploading_agent_uri: - # we have a duplicate posting of the object from same agent - # check if existing Sunfish object is same as that being fetched from aggregation_source - # Need to ignore the Sunfish_RM structure in the compare - # Thus, the following isn't completely correct - # note we don't update the object (for now) - if self.get_object(file_path) == redfish_obj: - # (which shouldn't happen since we are adding in the Sunfish_RM details) - warnings.warn('Duplicate Resource found, ignored') - pass - elif self.get_object(file_path) != redfish_obj: - warnings.warn('Resource state changed') - # put object change checks and updates here + # reject this duplicate posting of the object from same agent + # note we don't update the object + warnings.warn('Duplicate Resource found, ignored') + pass else: - # we may have a naming conflict between agents - if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] != "foreign": - # we have a simple name conflict - # find new name, build xref + # is object a Fabric? + obj_type = redfish_obj["@odata.type"].split('.')[0] + obj_type = obj_type.replace("#","") # #Fabric -> Fabric + + if obj_type == 'Fabric': + # is the conflicting Fabric object the same Fabric Object? + if "UUID" in redfish_obj and "UUID" in existing_obj: + if redfish_obj['UUID'] == existing_obj['UUID']: + # assume new Fabric object is the same as existing one + # because aggregation_sources are cooperating + # So, do not post this newly uploaded copy + # However, do update existing object with new 'sharer agent' + modified_existing_obj =RedfishEventHandler.updateIfMergedFabrics(self,redfish_obj, \ + uploading_agent_uri, existing_obj) + if modified_existing_obj: + self.storage_backend.replace(existing_obj) + logger.info(f"----- updated (replaced) existing fabric object") + else: + # different fabrics, just rename the new one + redfish_obj = RedfishEventHandler.renameUploadedObject(self, redfish_obj, aggregation_source) + add_aggregation_source_reference(redfish_obj, aggregation_source) + logger.info(f"creating object: {file_path}") + RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + else: + # assume different fabrics, just rename the new one + redfish_obj = RedfishEventHandler.renameUploadedObject(self, redfish_obj, aggregation_source) + add_aggregation_source_reference(redfish_obj, aggregation_source) + logger.info(f"creating object: {file_path}") + RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) + else: + # we have a simple name conflict on a non-Fabric object + # find new name, build xref, check boundary ports and create the new object redfish_obj = RedfishEventHandler.renameUploadedObject(self, redfish_obj, aggregation_source) add_aggregation_source_reference(redfish_obj, aggregation_source) - # add_aggregation_source_reference will add "Sunfish_RM" dict to "Oem" dict - # here's where we check for a Fabric Object merge - merged_fabrics =RedfishEventHandler.updateIfMergedFabric(self,redfish_obj, existing_obj) - + logger.info(f"creating object: {file_path}") if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": RedfishEventHandler.track_boundary_port(self, redfish_obj, aggregation_source) - print(f"creating renamed object: {file_path}") - logger.info(f"creating renamed object: {file_path}") RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) - if merged_fabrics: - # need to update original fabric object with the xref AFTER new obj created - self.storage_backend.replace(existing_obj) - print(f"----- updated (replaced) existing fabric object") - - else: - # we have a placeholder or boundary link component to process - # put in placeholder codes here - print(f"Non-owned component {obj_path} uploaded, ignored") - logger.info(f"Non-owned component {obj_path} uploaded, ignored") - #add_aggregation_source_reference(redfish_obj, aggregation_source) - #print(f"creating renamed object: {file_path}") - #RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) - else: # assume new object, create it and its parent collection if needed add_aggregation_source_reference(redfish_obj, aggregation_source) - print(f"creating object: {file_path}") + logger.info(f"creating object: {file_path}") if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort": RedfishEventHandler.track_boundary_port(self, redfish_obj, aggregation_source) + # is this new object a new fabric object with same fabric UUID as an existing fabric? + # RedfishEventHandler.checkForAliasedFabrics(self, redfish_obj, aggregation_source) RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj) return redfish_obj @@ -619,42 +611,32 @@ def xlateToSunfishPath(self,agent_path, aggregation_source): try: uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') if os.path.exists(uri_alias_file): - print(f"reading alias file {uri_alias_file}") with open(uri_alias_file, 'r') as data_json: uri_aliasDB = json.load(data_json) data_json.close() - print(json.dumps(uri_aliasDB, indent = 4)) else: - print(f"alias file {uri_alias_file} not found") + logger.error(f"alias file {uri_alias_file} not found") raise Exception except: raise Exception - print(f"xlate {agent_path} to Sunfish path") agentGiven_segments = agent_path.split("/") - print(f"agentGiven tree: {agentGiven_segments}") owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] logger.debug(f"agent id: {owning_agent_id}") # check if owning_agent has any aliases assigned if owning_agent_id in uri_aliasDB["Agents_xref_URIs"]: logger.debug(f"xlating Agent path : {agent_path}") - print(f"xlating Agent path : {agent_path}") agentFinal_obj_path = "" for i in range(1,len(agentGiven_segments)): - print(agentGiven_segments[i]) agentFinal_obj_path=agentFinal_obj_path +"/"+ agentGiven_segments[i] - print(f"agentFinal_obj_path is {agentFinal_obj_path}") # test this path segment - print("test this path segment") - print( agentFinal_obj_path in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"]) if agentFinal_obj_path in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"]: # need to replace agent_path built to this point with sunfish alias - print(f"found an alias for {agentFinal_obj_path}") sunfishAliasPath = uri_aliasDB["Agents_xref_URIs"][owning_agent_id] \ ["aliases"][agentFinal_obj_path] agentFinal_obj_path = sunfishAliasPath - print(f"aliased path is {agentFinal_obj_path}") + logger.debug(f"aliased path is {agentFinal_obj_path}") # next segment agent_path = agentFinal_obj_path return agent_path @@ -666,9 +648,8 @@ def updateAllAliasedLinks(self,aggregation_source): with open(uri_alias_file, 'r') as data_json: uri_aliasDB = json.load(data_json) data_json.close() - print(json.dumps(uri_aliasDB, indent = 4)) else: - print(f"alias file {uri_alias_file} not found") + logger.error(f"alias file {uri_alias_file} not found") raise Exception except: @@ -688,8 +669,7 @@ def updateAllAliasedLinks(self,aggregation_source): # update all the objects for upload_obj_URI in agent_uploads: - logger.info(f"updating links in obj: {upload_obj_URI}") - print(f"updating links in obj: {upload_obj_URI}") + logger.debug(f"updating links in obj: {upload_obj_URI}") RedfishEventHandler.updateObjectAliasedLinks(self, upload_obj_URI, agent_aliases) return @@ -711,7 +691,7 @@ def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): if key == '@odata.id'and path_to_nested_URI != "": # check @odata.id: value for an alias if value == URI_to_match: - print(f"---- modifying {value} to {URI_to_sub}") + logger.info(f"modifying {value} to {URI_to_sub}") obj[key] = URI_to_sub nestedPaths.append(path_to_nested_URI) elif key != "Sunfish_RM" and (type(value) == list or type(value) == dict): @@ -720,21 +700,23 @@ def findNestedURIs(self, URI_to_match, URI_to_sub, obj, path_to_nested_URI): try: sunfish_obj = self.storage_backend.read( object_URI) - aliasedNestedPaths=[] - obj_modified = False - for agent_URI, sunfish_URI in agent_aliases.items(): - # find all the references to the aliased agent_URI and replace it - path_to_nested_URI="" - aliasedNestedPaths= findNestedURIs(self, agent_URI, sunfish_URI, sunfish_obj, path_to_nested_URI ) - if aliasedNestedPaths: - obj_modified = True - for path in aliasedNestedPaths: - print(f"---- replaced {agent_URI} with {sunfish_URI} at {path}") - print(f"---- aliasedNestedPaths is {aliasedNestedPaths}") - if obj_modified: - print(f"---- final updated object") - print(json.dumps(sunfish_obj, indent=2)) - self.storage_backend.replace(sunfish_obj) + obj_type = redfish_obj["@odata.type"].split('.')[0] + obj_type = obj_type.split("/")[-1] + obj_type = obj_type.replace("#","") # #Évent -> Event + # should not do aliasing on the members of a Collection + # since the members list should contain both original and aliased URIs + if "Collection" not in obj_type : + aliasedNestedPaths=[] + obj_modified = False + for agent_URI, sunfish_URI in agent_aliases.items(): + # find all the references to the aliased agent_URI and replace it + path_to_nested_URI="" + aliasedNestedPaths= findNestedURIs(self, agent_URI, sunfish_URI, sunfish_obj, path_to_nested_URI ) + if aliasedNestedPaths: + obj_modified = True + if obj_modified: + logger.info(json.dumps(sunfish_obj, indent=2)) + self.storage_backend.replace(sunfish_obj) except: logger.error(f"could not update links in object {object_URI}") @@ -745,13 +727,11 @@ def updateAllAgentsRedirectedLinks(self ): try: uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') if os.path.exists(uri_alias_file): - print(f"reading alias file {uri_alias_file}") with open(uri_alias_file, 'r') as data_json: uri_aliasDB = json.load(data_json) data_json.close() - print(json.dumps(uri_aliasDB, indent = 4)) else: - print(f"alias file {uri_alias_file} not found") + logger.error(f"alias file {uri_alias_file} not found") raise Exception except: @@ -761,51 +741,41 @@ def updateAllAgentsRedirectedLinks(self ): modified_aliasDB = False for owning_agent_id in uri_aliasDB['Agents_xref_URIs']: logger.debug(f"redirecting placeholder links in all boundary ports for : {owning_agent_id}") - print(f"redirecting placeholder links in all boundary ports for : {owning_agent_id}") if owning_agent_id in uri_aliasDB['Agents_xref_URIs']: if 'boundaryPorts' in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]: for agent_bp_URI in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts']: agent_bp_obj = self.storage_backend.read(agent_bp_URI) - print(f"------ redirecting links for {agent_bp_URI}") + logger.debug(f"------ redirecting links for {agent_bp_URI}") # check PortType if "PortType" in agent_bp_obj and agent_bp_obj["PortType"] == "InterswitchPort": - print(f"------ InterswitchPort") # We are assuming if one end of link is ISL, both must be if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: - print(f"------ PeerPortURI found") RedfishEventHandler.redirectInterswitchLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) modified_aliasDB = True # need to replace the update object and re-save the uri_aliasDB - #print(f"------ redirected object is {json.dumps(agent_bp_obj, indent=4)}") self.storage_backend.replace(agent_bp_obj) else: - print(f"------ PeerPortURI NOT found") + logger.info(f"------ PeerPortURI NOT found") pass elif "PortType" in agent_bp_obj and (agent_bp_obj["PortType"] == "UpstreamPort" ): - print(f"------ UpstreamPort") if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: - print(f"------ PeerPortURI found") RedfishEventHandler.redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) modified_aliasDB = True # need to replace the update object and re-save the uri_aliasDB - print(f"------ redirected object is {json.dumps(agent_bp_obj, indent=4)}") self.storage_backend.replace(agent_bp_obj) else: - print(f"------ PeerPortURI NOT found") + logger.info(f"------ PeerPortURI NOT found") pass elif "PortType" in agent_bp_obj and (agent_bp_obj["PortType"] == "DownstreamPort" ): - print(f"------ DownstreamPort") if "PeerPortURI" in uri_aliasDB['Agents_xref_URIs'][owning_agent_id]['boundaryPorts'][agent_bp_URI]: - print(f"------ PeerPortURI found") RedfishEventHandler.redirectDownstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB) modified_aliasDB = True # need to replace the update object and re-save the uri_aliasDB - print(f"------ redirected object is {json.dumps(agent_bp_obj, indent=4)}") self.storage_backend.replace(agent_bp_obj) else: - print(f"------ PeerPortURI NOT found") + logger.info(f"------ PeerPortURI NOT found") pass @@ -821,17 +791,15 @@ def redirectInterswitchLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): logger.info(f"redirecting Interswitch ConnectedSwitches and ConnectedSwitchPorts") - print(f"------ redirecting Interswitch ConnectedSwitches and ConnectedSwitchPorts") agent_bp_URI = agent_bp_obj["@odata.id"] redirected_CSP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ ['boundaryPorts'][agent_bp_URI]["PeerPortURI"] switch_uri_segments = redirected_CSP.split("/")[0:-2] - print(f"------ switch_uri_segments {switch_uri_segments}") redirected_switch_link="" for i in range(1,len(switch_uri_segments)): redirected_switch_link = redirected_switch_link +"/" + switch_uri_segments[i] - print(f"------ redirected_switch_link is {redirected_switch_link}") + logger.debug(f"------ redirected_switch_link is {redirected_switch_link}") if "Links" not in agent_bp_obj: agent_bp_obj["Links"] = {} @@ -841,44 +809,37 @@ def redirectInterswitchLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): agent_bp_obj["Links"]["ConnectedSwitches"]=[] if len(agent_bp_obj["Links"]["ConnectedSwitchPorts"]) >1: logger.error(f"Interswitch Link claims >1 ConnectedSwitchPorts") - print(f"------ Interswitch Link claims >1 ConnectedSwitchPorts") else: if agent_bp_obj["Links"]["ConnectedSwitchPorts"]: agent_placeholder_CSP = agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] = redirected_CSP logger.info(f"redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") - print(f"------ redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") # save the original agent placeholder in the uri_aliasDB uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CSP else: # no placeholder links in ConnectedSwitchPorts array agent_bp_obj["Links"]["ConnectedSwitchPorts"].append({"@odata.id":redirected_CSP}) logger.info(f"created ConnectedSwitchPort to {redirected_CSP}") - print(f"------ created ConnectedSwitchPort to {redirected_CSP}") if len(agent_bp_obj["Links"]["ConnectedSwitches"]) >1: logger.error(f"Interswitch Link claims >1 ConnectedSwitches") - print(f"------ Interswitch Link claims >1 ConnectedSwitches") else: if agent_bp_obj["Links"]["ConnectedSwitches"]: agent_placeholder_switch_link = agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] = redirected_switch_link logger.info(f"redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") - print(f"------ redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") # save the original agent placeholder in the uri_aliasDB uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ ['boundaryPorts'][agent_bp_URI]["AgentPeerSwitchURI"] = agent_placeholder_switch_link else: # no placeholder links in ConnectedSwitches array agent_bp_obj["Links"]["ConnectedSwitches"].append({"@odata.id":redirected_switch_link}) logger.info(f"created ConnectedSwitches to {redirected_switch_link}") - print(f"------ created ConnectedSwitches to {redirected_switch_link}") def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): logger.info(f"redirecting UpstreamPort AssociatedEndpoints and ConnectedPorts") - print(f"------ redirecting UpstreamPort AssociatedEndpoints and ConnectedPorts") agent_bp_URI = agent_bp_obj["@odata.id"] redirected_CP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ @@ -888,7 +849,7 @@ def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): host_link="" for i in range(1,len(host_uri_segments)): host_link = host_link +"/" + host_uri_segments[i] - print(f"------ host_link is {host_link}") + logger.debug(f"host_link is {host_link}") # extract the Endpoint URI associated with this parent object host_obj = self.storage_backend.read(host_link) @@ -904,53 +865,45 @@ def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): if len(agent_bp_obj["Links"]["ConnectedPorts"]) >1: logger.error(f"UpstreamPort Link claims >1 ConnectedPorts") - print(f"------ UpstreamPort Link claims >1 ConnectedPorts") else: if agent_bp_obj["Links"]["ConnectedPorts"]: agent_placeholder_CP = agent_bp_obj["Links"]["ConnectedPorts"][0]["@odata.id"] agent_bp_obj["Links"]["ConnectedPorts"][0]["@odata.id"] = redirected_CP logger.info(f"redirected {agent_placeholder_CP} to \n------ {redirected_CP}") - print(f"------ redirected {agent_placeholder_CP} to \n------ {redirected_CP}") # save the original agent placeholder in the uri_aliasDB uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CP else: # no placeholder links in ConnectedSwitchPorts array agent_bp_obj["Links"]["ConnectedPorts"].append({"@odata.id":redirected_CP}) logger.info(f"created ConnectedPorts to {redirected_CP}") - print(f"------ created ConnectedPorts to {redirected_CP}") if len(agent_bp_obj["Links"]["AssociatedEndpoints"]) >1: logger.error(f"UpstreamPort Link claims >1 AssociatedEndpoints") - print(f"------ UpstreamPort Link claims >1 AssociatedEndpoints") else: if agent_bp_obj["Links"]["AssociatedEndpoints"]: agent_placeholder_endpoint = agent_bp_obj["Links"]["AssociatedEndpoints"][0]["@odata.id"] agent_bp_obj["Links"]["AssociatedEndpoints"][0]["@odata.id"] = redirected_endpoint logger.info(f"redirected {agent_placeholder_endpoint} to \n------ {redirected_endpoint}") - print(f"------ redirected {agent_placeholder_endpoint} to \n------ {redirected_endpoint}") # save the original agent placeholder in the uri_aliasDB uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ ['boundaryPorts'][agent_bp_URI]["AgentPeerEndpointURI"] = agent_placeholder_endpoint else: # no placeholder links in AssociatedEndpoints array agent_bp_obj["Links"]["AssociatedEndpoints"].append({"@odata.id":redirected_endpoint}) logger.info(f"created AssociatedEndpoints to {redirected_endpoint}") - print(f"------ created AssociatedEndpoints to {redirected_endpoint}") def redirectDownstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): logger.info(f"redirecting Downstream ConnectedSwitches and ConnectedSwitchPorts") - print(f"------ redirecting Downstream ConnectedSwitches and ConnectedSwitchPorts") agent_bp_URI = agent_bp_obj["@odata.id"] redirected_CSP = uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ ['boundaryPorts'][agent_bp_URI]["PeerPortURI"] switch_uri_segments = redirected_CSP.split("/")[0:-2] - print(f"------ switch_uri_segments {switch_uri_segments}") redirected_switch_link="" for i in range(1,len(switch_uri_segments)): redirected_switch_link = redirected_switch_link +"/" + switch_uri_segments[i] - print(f"------ redirected_switch_link is {redirected_switch_link}") + logger.info(f"------ redirected_switch_link is {redirected_switch_link}") if "Links" not in agent_bp_obj: agent_bp_obj["Links"] = {} @@ -960,38 +913,32 @@ def redirectDownstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB): agent_bp_obj["Links"]["ConnectedSwitches"]=[] if len(agent_bp_obj["Links"]["ConnectedSwitchPorts"]) >1: logger.error(f"Downstream Link claims >1 ConnectedSwitchPorts") - print(f"------ Downstream Link claims >1 ConnectedSwitchPorts") else: if agent_bp_obj["Links"]["ConnectedSwitchPorts"]: agent_placeholder_CSP = agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] agent_bp_obj["Links"]["ConnectedSwitchPorts"][0]["@odata.id"] = redirected_CSP logger.info(f"redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") - print(f"------ redirected {agent_placeholder_CSP} to \n------ {redirected_CSP}") # save the original agent placeholder in the uri_aliasDB uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ ['boundaryPorts'][agent_bp_URI]["AgentPeerPortURI"] = agent_placeholder_CSP else: # no placeholder links in ConnectedSwitchPorts array agent_bp_obj["Links"]["ConnectedSwitchPorts"].append({"@odata.id":redirected_CSP}) logger.info(f"created ConnectedSwitchPort to {redirected_CSP}") - print(f"------ created ConnectedSwitchPort to {redirected_CSP}") if len(agent_bp_obj["Links"]["ConnectedSwitches"]) >1: logger.error(f"Downstream Link claims >1 ConnectedSwitches") - print(f"------ Downstream Link claims >1 ConnectedSwitches") else: if agent_bp_obj["Links"]["ConnectedSwitches"]: agent_placeholder_switch_link = agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] agent_bp_obj["Links"]["ConnectedSwitches"][0]["@odata.id"] = redirected_switch_link logger.info(f"redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") - print(f"------ redirected {agent_placeholder_switch_link} to \n------ {redirected_switch_link}") # save the original agent placeholder in the uri_aliasDB uri_aliasDB['Agents_xref_URIs'][owning_agent_id]\ ['boundaryPorts'][agent_bp_URI]["AgentPeerSwitchURI"] = agent_placeholder_switch_link else: # no placeholder links in ConnectedSwitches array agent_bp_obj["Links"]["ConnectedSwitches"].append({"@odata.id":redirected_switch_link}) logger.info(f"created ConnectedSwitches to {redirected_switch_link}") - print(f"------ created ConnectedSwitches to {redirected_switch_link}") @@ -999,13 +946,11 @@ def updateSunfishAliasDB(self,sunfish_URI, agent_URI, aggregation_source): try: uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') if os.path.exists(uri_alias_file): - print(f"reading alias file {uri_alias_file}") with open(uri_alias_file, 'r') as data_json: uri_aliasDB = json.load(data_json) data_json.close() - print(json.dumps(uri_aliasDB, indent = 4)) else: - print(f"alias file {uri_alias_file} not found") + logger.error(f"alias file {uri_alias_file} not found") raise Exception except: @@ -1017,10 +962,8 @@ def updateSunfishAliasDB(self,sunfish_URI, agent_URI, aggregation_source): uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = {} uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] = {} uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agent_URI]=sunfish_URI - print(json.dumps(uri_aliasDB, indent=2)) else: uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agent_URI]=sunfish_URI - print(json.dumps(uri_aliasDB, indent=2)) if sunfish_URI not in uri_aliasDB["Sunfish_xref_URIs"]["aliases"]: uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfish_URI] = [] @@ -1035,18 +978,35 @@ def updateSunfishAliasDB(self,sunfish_URI, agent_URI, aggregation_source): return uri_aliasDB - def updateIfMergedFabric(self,redfish_obj, sunfish_obj): - did_a_merge = False + def updateIfMergedFabrics(self,redfish_obj, uploading_agent_uri, sunfish_obj ): + # both objects must be Fabric objects + # both objects must have Sunfish_RM property + logger.info(f"----- merged fabric processed") + did_a_merge = True + # update sunfish_obj with agent_uri of redfish_obj as a sharer + new_obj_owner={"@odata.id":uploading_agent_uri} + + if "FabricSharedWith" in sunfish_obj["Oem"]["Sunfish_RM"]: + sunfish_obj["Oem"]["Sunfish_RM"]["FabricSharedWith"].append(new_obj_owner) + else: + sunfish_obj["Oem"]["Sunfish_RM"]["FabricSharedWith"] = [] + sunfish_obj["Oem"]["Sunfish_RM"]["FabricSharedWith"].append(new_obj_owner) + logger.debug(f"sunfish merged fabric object: {json.dumps(sunfish_obj,indent=2)}") + + return did_a_merge + + def checkForAliasedFabrics(self, redfish_obj, aggregation_source): + found_an_aliased_fabric = False obj_type = redfish_obj["@odata.type"].split('.')[0] obj_type = obj_type.replace("#","") # #Évent -> Event - print(f"----- potential merged object {redfish_obj['@odata.id']}") if obj_type == "Fabric": - print(f"----- object is fabric") + # TODO: + # check all existing Fabrics + # look for Fabric UUID in existing Fabrics + # compare UUIDs if "UUID" in redfish_obj and "UUID" in sunfish_obj: if redfish_obj['UUID'] == sunfish_obj['UUID']: - print(f"----- found merge fabric candidate") did_a_merge = True - # (TODO) more checks ? # update both redfish_obj and sunfish_obj with Fabric xref in Sunfish_RM new_obj_fabric_xref={"@odata.id":sunfish_obj["@odata.id"]} existing_obj_fabric_xref={"@odata.id":redfish_obj["@odata.id"]} @@ -1055,21 +1015,18 @@ def updateIfMergedFabric(self,redfish_obj, sunfish_obj): else: redfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"] = [] redfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(new_obj_fabric_xref) - print(f"redfish merged fabric object: {json.dumps(redfish_obj,indent=2)}") if "MergedFabrics" in sunfish_obj["Oem"]["Sunfish_RM"]: sunfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(existing_obj_fabric_xref) else: sunfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"] = [] sunfish_obj["Oem"]["Sunfish_RM"]["MergedFabrics"].append(existing_obj_fabric_xref) - print(f"sunfish merged fabric object: {json.dumps(sunfish_obj,indent=2)}") + logger.debug(f"sunfish merged fabric object: {json.dumps(sunfish_obj,indent=2)}") else: - print(f"----- not same fabrics") - - + logger.debug(f"----- not same fabrics") - return did_a_merge + return found_an_aliased_fabric def renameUploadedObject(self,redfish_obj, aggregation_source): # redfish_obj uses agent namespace @@ -1078,25 +1035,19 @@ def renameUploadedObject(self,redfish_obj, aggregation_source): try: uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') if os.path.exists(uri_alias_file): - print(f"reading alias file {uri_alias_file}") with open(uri_alias_file, 'r') as data_json: uri_aliasDB = json.load(data_json) data_json.close() - print(json.dumps(uri_aliasDB, indent = 4)) else: - print(f"alias file {uri_alias_file} not found") + logger.error(f"alias file {uri_alias_file} not found") raise Exception except: raise Exception - print(json.dumps(redfish_obj, indent=2)) agentGiven_obj_path = redfish_obj['@odata.id'] agentGiven_segments = agentGiven_obj_path.split("/") agentGiven_obj_name = agentGiven_segments[-1] - #agentGiven_tree_segments = os.path.relpath(redfish_obj['@odata.id'], self.conf['redfish_root']).split("/") - print(f"agentGiven tree: {agentGiven_segments}") - #agent_file_path = os.path.join(self.conf['redfish_root'], agent_obj_path, 'index.json') owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] # generate a new path and object name logger.debug(f"renaming object: {agentGiven_obj_path}") @@ -1104,7 +1055,6 @@ def renameUploadedObject(self,redfish_obj, aggregation_source): sunfishGiven_obj_name = "Sunfish_"+owning_agent_id[:4]+"_"+agentGiven_obj_name sunfishGiven_obj_path = "/" for i in range(1,len(agentGiven_segments)-1): - print(agentGiven_segments[i]) sunfishGiven_obj_path=sunfishGiven_obj_path + agentGiven_segments[i]+"/" sunfishGiven_obj_path=sunfishGiven_obj_path + sunfishGiven_obj_name # need to check new name is also unused @@ -1114,20 +1064,17 @@ def renameUploadedObject(self,redfish_obj, aggregation_source): sunfishGiven_obj_path=sunfishGiven_obj_path.replace(sunfishGiven_obj_name,temp_string) # - print(sunfishGiven_obj_path) + logger.debug(sunfishGiven_obj_path) redfish_obj['@odata.id'] = sunfishGiven_obj_path if redfish_obj['Id'] == agentGiven_obj_name: redfish_obj['Id'] = sunfishGiven_obj_name - print(json.dumps(redfish_obj, indent=2)) # now need to update aliasDB if owning_agent_id not in uri_aliasDB["Agents_xref_URIs"]: uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = {} uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"] = {} uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agentGiven_obj_path]=sunfishGiven_obj_path - print(json.dumps(uri_aliasDB, indent=2)) else: uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["aliases"][agentGiven_obj_path]=sunfishGiven_obj_path - print(json.dumps(uri_aliasDB, indent=2)) if sunfishGiven_obj_path not in uri_aliasDB["Sunfish_xref_URIs"]["aliases"]: uri_aliasDB["Sunfish_xref_URIs"]["aliases"][sunfishGiven_obj_path] = [] @@ -1167,26 +1114,18 @@ def match_boundary_port(self, searching_agent_id, searching_port_URI, URI_aliasD else: searching_for_local_portId = 'No local portId' # do NOT use 'None' or "" - print(f"----- RemoteLinkPartnerId {searching_for_remote_partnerId}") - print(f"----- RemotePortId {searching_for_remote_portId}") - print(f"----- LocalLinkPartnerId {searching_for_local_partnerId}") - print(f"----- LocalPortId {searching_for_local_portId}") - print(f"----- searching for match to {searching_port_URI}") logger.info(f"searching for match to {searching_port_URI}") for agent_id, agent_db in URI_aliasDB['Agents_xref_URIs'].items(): if agent_id != searching_agent_id and 'boundaryPorts' in agent_db: - print(f"----- checking boundaryPorts of {agent_id}") for port_URI, port_details in agent_db['boundaryPorts'].items(): # always check if the remote port device ID is found first - print(f"----- port_URI {port_URI}") - print(f"----- port_details {port_details}") if ("LocalLinkPartnerId" in port_details) and \ (port_details["LocalLinkPartnerId"] == searching_for_remote_partnerId) and \ ("LocalPortId" in port_details) and \ (port_details["LocalPortId"] == searching_for_remote_portId): matching_port_URIs.append(port_URI) - # cross reference BOTH agent's boundaryPorts - print(f"----- found a matching port {port_URI}") + # cross reference BOTH agents' boundaryPorts + logger.info(f"----- found a matching port {port_URI}") URI_aliasDB['Agents_xref_URIs'][agent_id]['boundaryPorts']\ [port_URI]['PeerPortURI'] = searching_port_URI URI_aliasDB['Agents_xref_URIs'][searching_agent_id]['boundaryPorts']\ @@ -1199,7 +1138,7 @@ def match_boundary_port(self, searching_agent_id, searching_port_URI, URI_aliasD (port_details["RemotePortId"] == searching_for_local_portId): matching_port_URIs.append(port_URI) # cross reference BOTH agent's boundaryPorts - print(f"----- found a matching port {port_URI}") + logger.info(f"----- found a matching port {port_URI}") URI_aliasDB['Agents_xref_URIs'][agent_id]['boundaryPorts']\ [port_URI]['PeerPortURI'] = searching_port_URI URI_aliasDB['Agents_xref_URIs'][searching_agent_id]['boundaryPorts']\ @@ -1221,39 +1160,29 @@ def track_boundary_port(self, redfish_obj, aggregation_source): try: uri_alias_file = os.path.join(os.getcwd(), self.conf["backend_conf"]["fs_private"], 'URI_aliases.json') if os.path.exists(uri_alias_file): - print(f"reading alias file {uri_alias_file}") with open(uri_alias_file, 'r') as data_json: uri_aliasDB = json.load(data_json) data_json.close() - print(json.dumps(uri_aliasDB, indent = 4)) else: - print(f"alias file {uri_alias_file} not found") + logger.error(f"alias file {uri_alias_file} not found") raise Exception except: raise Exception - print(f"---- now processing a boundary port") logger.info(f"---- now processing a boundary port") obj_type = redfish_obj["@odata.type"].split(".")[0] obj_type = obj_type.replace("#","") save_alias_file = False - print(f"---- sunfish URI {redfish_obj['@odata.id']}") - print(f"---- obj type {obj_type}") port_protocol = redfish_obj["PortProtocol"] port_type = redfish_obj["PortType"] port_bc_flag = redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] - print(f"---- port_bc_flag {port_bc_flag}") if obj_type == "Port" and port_bc_flag == "BoundaryPort": - print(f"---- CXL BoundaryPort") owning_agent_id = aggregation_source["@odata.id"].split("/")[-1] localPortURI = redfish_obj['@odata.id'] if port_protocol=="CXL" and (port_type == "InterswitchPort" or \ port_type== "UpstreamPort" or port_type== "DownstreamPort"): - print(f"---- CXL {port_type}") - print(f"---- owning_agent_id {owning_agent_id}") - print(f"---- localPortURI {localPortURI}") # create a boundPort entry in uri_aliasDB if owning_agent_id not in uri_aliasDB["Agents_xref_URIs"]: uri_aliasDB["Agents_xref_URIs"][owning_agent_id] = agent_alias_dict @@ -1277,7 +1206,7 @@ def track_boundary_port(self, redfish_obj, aggregation_source): if "CXL" in redfish_obj and "LinkPartnerReceive" in redfish_obj["CXL"]: # rely on 'and' short circuiting remote_link_partner_id = redfish_obj["CXL"]["LinkPartnerReceive"]["LinkPartnerId"] remote_port_id = redfish_obj["CXL"]["LinkPartnerReceive"]["PortId"] - print(f"---- obj link_partner_id {remote_link_partner_id}") + logger.debug(f"---- obj link_partner_id {remote_link_partner_id}") if localPortURI not in uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"]: uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI] = {} uri_aliasDB["Agents_xref_URIs"][owning_agent_id]["boundaryPorts"][localPortURI]\ @@ -1290,17 +1219,15 @@ def track_boundary_port(self, redfish_obj, aggregation_source): with open(uri_alias_file,'w') as data_json: json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) data_json.close() - print(json.dumps(uri_aliasDB, indent=2)) else: - print(f"---- CXL BoundaryPort found, but not InterswitchPort, UpstreamPort, or DownstreamPort") + logger.debug(f"---- CXL BoundaryPort found, but not InterswitchPort, UpstreamPort, or DownstreamPort") pass matching_ports = RedfishEventHandler.match_boundary_port(self, owning_agent_id, localPortURI, uri_aliasDB) if matching_ports or save_alias_file: with open(uri_alias_file,'w') as data_json: json.dump(uri_aliasDB, data_json, indent=4, sort_keys=True) data_json.close() - print(json.dumps(uri_aliasDB, indent=2)) - print(f"----- boundary ports matched {matching_ports}") + logger.debug(f"----- boundary ports matched {matching_ports}") return