Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 20 additions & 33 deletions sunfish_plugins/events_handlers/redfish/redfish_event_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ def AggregationSourceDiscovered(cls, event_handler: EventHandlerInterface, event
response = response.json()

### Save agent registration
# connection_method_name = connectionMethodId.split('/')[-1]
# connection_method_name = connectionMethodId[:-len(connection_method_name)]
event_handler.core.storage_backend.write(response)

aggregation_source_id = str(uuid.uuid4())
Expand Down Expand Up @@ -85,8 +83,6 @@ def ResourceCreated(cls, event_handler: EventHandlerInterface, event: dict, cont
# sunfishAliasDB contains renaming data, the alias xref array, the boundaryLink
# data, and assorted flags that are used during upload renaming and final merge of
# boundary components based on boundary links.

#
#

logger.info("New resource created")
Expand Down Expand Up @@ -149,7 +145,6 @@ def TriggerEvent(cls, event_handler: EventHandlerInterface, event: dict, context
#
logger.info("TriggerEvent method called")
file_to_send = event['MessageArgs'][0] # relative Resource Path
#file_path = os.path.join(self.conf['redfish_root'], file_to_send)
hostname = event['MessageArgs'][1] # target address
destination = hostname + "/EventListener" # may match a Subscription object's 'Destination' property
logger.debug(f"path of file_to_send is {file_to_send}")
Expand Down Expand Up @@ -299,7 +294,6 @@ def forward_event(self, list, payload):
Returns:
list: list of all the reachable subcribers for the event.
"""
# resp = 400

for id in list:
path = os.path.join(self.redfish_root, 'EventService', 'Subscriptions', id)
Expand Down Expand Up @@ -403,39 +397,24 @@ def handleNestedObject(self, obj):
# this needs to be done on ALL agents, not just the one we just uploaded
RedfishEventHandler.updateAllAgentsRedirectedLinks(self)

return visited #why not the 'fetched' list?
return visited

def create_uploaded_object(self, path: str, payload: dict):
# before to add the ID and to call the methods there should be the json validation

# generate unique uuid if is not present
if '@odata.id' not in payload and 'Id' not in payload:
pass
#id = str(uuid.uuid4())
#to_add = {
#'Id': id,
#'@odata.id': os.path.join(path, id)
#}
#payload.update(to_add)
raise exception(f"create_uploaded_object: no Redfish ID (@odata.id) found")

#object_type = self._get_type(payload)
# we assume agents can upload collections, just not the root level collections
# we will check for uploaded collections later
#if "Collection" in object_type:
#raise CollectionNotSupported()

payload_to_write = payload

try:
# 1. check the path target of the operation exists
# self.storage_backend.read(path)
# 2. we don't check the manager; we assume uploading agent is the manager unless it says otherwise
#agent_response = self.objects_manager.forward_to_manager(SunfishRequestType.CREATE, path, payload=payload)
#if agent_response:
#payload_to_write = agent_response
# 3. should be no custom handler, this is not a POST, we upload the objects directly into the Redfish database
#self.objects_handler.dispatch(object_type, path, SunfishRequestType.CREATE, payload=payload)
# this would be another location to verify new object to be written
# meets Sunfish and Redfish requirements
pass
except ResourceNotFound:
logger.error("The collection where the resource is to be created does not exist.")
Expand All @@ -445,7 +424,7 @@ def create_uploaded_object(self, path: str, payload: dict):
# The object does not have a handler.
logger.debug(f"The object {object_type} does not have a custom handler")
pass
# 4. persist change in Sunfish tree
# persist change in Sunfish tree
return self.storage_backend.write(payload_to_write)

def get_aggregation_source(self, aggregation_source):
Expand Down Expand Up @@ -499,12 +478,21 @@ def fetchResource(self, obj_id, aggregation_source):

if response.status_code == 200: # Agent must have returned this object
redfish_obj = response.json()

# now rename if necessary and copy object into Sunfish inventory
redfish_obj = RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source)
if redfish_obj['@odata.id'] not in aggregation_source["Links"]["ResourcesAccessed"]:
aggregation_source["Links"]["ResourcesAccessed"].append(redfish_obj['@odata.id'])
return redfish_obj
# however, it must be a minimally valid object
# This would be a great spot to insert a call to a Redfish schema validation function
# that could return a grading of this new redfish_obj: [PASS, FAIL, CAUTIONS]
# However, we are debugging not just code, but also new Redfish schema,
# so for now we just test for two required Redfish Properties to help weed out obviously incorrect responses
if '@odata.id' in redfish_obj and '@odata.type' in redfish_obj:

# now rename if necessary and copy object into Sunfish inventory
redfish_obj = RedfishEventHandler.createInspectedObject(self,redfish_obj, aggregation_source)
if redfish_obj['@odata.id'] not in aggregation_source["Links"]["ResourcesAccessed"]:
aggregation_source["Links"]["ResourcesAccessed"].append(redfish_obj['@odata.id'])
return redfish_obj
else:
# we treat this as an unsuccessful retrieval
return
else: # Agent did not successfully return the obj_id sought
# we still need to check the obj_id for an aliased parent segment
# so we detect renamed navigation links
Expand All @@ -517,6 +505,7 @@ def createInspectedObject(self,redfish_obj, aggregation_source):
if '@odata.id' in redfish_obj:
obj_path = os.path.relpath(redfish_obj['@odata.id'], self.conf['redfish_root'])
else:
# we shouldn't allow an improper object to be passed in, so let's take an exception
raise PropertyNotFound(f"missing @odata.id in \n {json.dumps(redfish_obj, indent=2)}")

file_path = os.path.join(self.conf['redfish_root'], obj_path)
Expand Down Expand Up @@ -601,7 +590,6 @@ def createInspectedObject(self,redfish_obj, aggregation_source):
if redfish_obj["Oem"]["Sunfish_RM"]["BoundaryComponent"] == "BoundaryPort":
RedfishEventHandler.track_boundary_port(self, redfish_obj, aggregation_source)
# is this new object a new fabric object with same fabric UUID as an existing fabric?
# RedfishEventHandler.checkForAliasedFabrics(self, redfish_obj, aggregation_source)
RedfishEventHandler.create_uploaded_object(self, file_path, redfish_obj)

return redfish_obj
Expand Down Expand Up @@ -856,7 +844,6 @@ def redirectUpstreamPortLinks(self,owning_agent_id, agent_bp_obj,uri_aliasDB):
# extract the Endpoint URI associated with this parent object
host_obj = self.storage_backend.read(host_link)
redirected_endpoint = host_obj["Links"]["Endpoints"][0]["@odata.id"]
#redirected_endpoint = "None" #for now, to test

if "Links" not in agent_bp_obj:
agent_bp_obj["Links"] = {}
Expand Down
55 changes: 28 additions & 27 deletions sunfish_plugins/storage/file_system_backend/backend_FS.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,22 +66,34 @@ def write(self, payload: dict):
length = len(self.redfish_root)
id = payload['@odata.id'][length:] # id without redfish.root (es. /redfish/v1/)
parent_is_collection = True # default assumption
last_parent_to_exist=""

print(f"BackendFS.write called on {id}")
logging.info(f"BackendFS.write called on {id}")
id = id.split('/')
for index in range(2, len(id[1:])):
to_check = os.path.join('/'.join(id[:index]), 'index.json')
to_check = os.path.join(os.getcwd(), self.root, to_check)
print(f"BackendFS.write(): path to check: {to_check}")
logging.info(f"BackendFS.write(): path to check: {to_check}")
if os.path.exists(to_check) is True:
# capture this parent path as existing
last_parent_to_exist = to_check
if os.path.exists(to_check) is False:
print("path does not exist\n")
raise ActionNotAllowed()
logging.info("path does not exist\n")
# nice to know, but NOT an error!
# Log the situation and continue



# This particular code block looks unfinished and its purpose/functionality is unknown.
# It looks as if part of this block was intended to fill in missing path elements and is redundant
# with code just below this block. This block also sets a flag that is never used. - more analysis required.
#
'''
with open(to_check, 'r') as data_json:
data = json.load(data_json)
data_json.close()
if 'Collection' in data["@odata.type"]:
print("path is to a Collection\n")
logging.info("path is to a Collection\n")
members = data["Members"]
for x in members:
if x["@odata.id"] == os.path.join(self.redfish_root, '/'.join(id[:index + 1])):
Expand All @@ -96,13 +108,13 @@ def write(self, payload: dict):
present = True
else:
el["@odata.id"] = os.path.join(self.redfish_root, '/'.join(id[:index + 1]))
print(f"BackendFS.write of {el['@odata.id']}")
logging.info(f"BackendFS.write of {el['@odata.id']}")
with open(to_check, 'w') as data_json:
json.dump(data, data_json, indent=4, sort_keys=True)
data_json.close()

'''
# we get here only if all grandparent objects exist
# we get here only if at least one grandparent objects exist
last_element = len(id) - 1
collection_type = id[last_element - 1]
resource_id = id[last_element]
Expand All @@ -112,33 +124,23 @@ def write(self, payload: dict):
for i in range(0, last_element - 1):
full_collection = full_collection + id[i] + '/'

collection_type = os.path.join(full_collection, collection_type)
full_collection = os.path.join(full_collection, collection_type)

collection_path = os.path.join(os.getcwd(), self.root,
collection_type) # collection_path .../Resources/[folder], collection_type = [folder]
parent_path = os.path.dirname(collection_path) # parent path .../Resources
full_collection)
parent_path = os.path.dirname(collection_path)

#pdb.set_trace()
# check if the directory of the Collection already exists
if not os.path.exists(collection_path):
# if parent directory doesn't exist, we assume it is a collection and create the collection
print(f"backendFS.write: making collection path directory")
logging.info(f"backendFS.write: making collection path directory")
os.makedirs(collection_path)

# the following line assumes the path element name dictates the collection type
# it is more proper to examine the @odata.type property of the object being created!
config = utils.generate_collection(collection_type)

# if the item to be written is managed by an agent, we want the collection containing it to also be marked
# accordingly. We do this only for collections to be created because we assume that if the collection is
# there already:
# a. The collection is a first level one that is managed by Sunfish
# b. The collection was previously created during an agent discovery process and therefore already marked
# if "Oem" in payload and "Sunfish_RM" in payload["Oem"] and len(id) > 2 :
# if "Oem" not in config:
# config["Oem"] = {}
# config["Oem"]["Sunfish_RM"] = payload["Oem"]["Sunfish_RM"]

## write file Resources/[folder]/index.json
with open(os.path.join(collection_path, "index.json"), "w") as fd:
fd.write(json.dumps(config, indent=4, sort_keys=True))
Expand All @@ -158,13 +160,13 @@ def write(self, payload: dict):
parent_data = json.load(data_json)
data_json.close()
if 'Collection' in parent_data["@odata.type"]:
print("parent path is to a Collection\n")
logging.info("parent path is to a Collection\n")
if utils.check_unique_id(index_path, payload['@odata.id']) is False:
raise AlreadyExists(payload['@odata.id'])
pass
else:
print("path is to an object\n")
parent_is_collection = False #
logging.info("path is to an object\n")
parent_is_collection = False
pass


Expand Down Expand Up @@ -234,7 +236,7 @@ def _update_object(self, payload: dict, replace: bool):
Returns:
str: id of the updated resource
"""
## code that re-write into file
# code that re-write into file
logging.info('BackendFS patch update called')

# get ID and collection from payload
Expand Down Expand Up @@ -274,7 +276,6 @@ def _update_object(self, payload: dict, replace: bool):
raise ResourceNotFound(resource_id)

result: str = self.read(payload["@odata.id"])
# result:str = payload['@odata.id']

return result

Expand All @@ -291,7 +292,7 @@ def remove(self, path:str):
Returns:
str: confirmation string
"""
## code that removes a file
# code that removes a file
logging.info('BackendFS: remove called')

length = len(self.redfish_root)
Expand Down