From 61e2c1cca0e374ff0752e3e8042d9b3e25535da2 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 23 Aug 2023 18:32:08 +0200 Subject: [PATCH 01/23] cleanup_org: failure on missing env var, use same env vars as cli: without DD_ prefix --- scripts/cleanup_org.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/cleanup_org.py b/scripts/cleanup_org.py index 776481d2..54922562 100644 --- a/scripts/cleanup_org.py +++ b/scripts/cleanup_org.py @@ -18,7 +18,7 @@ class Cleanup: def __init__(self): self.headers = get_headers() - self.base_url = os.getenv("DD_DESTINATION_API_URL") + self.base_url = os.environ["DESTINATION_API_URL"] # Validate test org self.validate_org() @@ -244,8 +244,8 @@ def delete_resource(self, _id, path, **kwargs): def get_headers(): return { - "DD-API-KEY": os.getenv("DD_DESTINATION_API_KEY"), - "DD-APPLICATION-KEY": os.getenv("DD_DESTINATION_APP_KEY"), + "DD-API-KEY": os.environ["DESTINATION_API_KEY"], + "DD-APPLICATION-KEY": os.environ["DESTINATION_APP_KEY"], "Content-Type": "application/json", } From a1331551e0bd1a1901d5cc32fdbd883b257bf186 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 23 Aug 2023 18:32:50 +0200 Subject: [PATCH 02/23] cleanup_org: don't try to delete ootb log pipeline: not authorized --- scripts/cleanup_org.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/cleanup_org.py b/scripts/cleanup_org.py index 54922562..ad935eaa 100644 --- a/scripts/cleanup_org.py +++ b/scripts/cleanup_org.py @@ -87,6 +87,9 @@ def cleanup_logs_custom_pipelines( path = "/api/v1/logs/config/pipelines" res = self.get_resources(path) for resource in res: + if resource["is_read_only"]: + # ootb/integration pipeline, can't delete + continue self.delete_resource(resource["id"], path) def cleanup_monitors( From 4cbdf2183b535eec46e412381de69c54ab15d534 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 23 Aug 2023 18:47:59 +0200 Subject: [PATCH 03/23] don't touch users or host tags --- scripts/cleanup_org.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/cleanup_org.py b/scripts/cleanup_org.py index ad935eaa..8b4f0d8d 100644 --- a/scripts/cleanup_org.py +++ b/scripts/cleanup_org.py @@ -36,11 +36,11 @@ def __init__(self): self.cleanup_logs_custom_pipelines() self.cleanup_monitors() self.cleanup_notebooks() - self.cleanup_users() + # self.cleanup_users() self.cleanup_roles() self.cleanup_logs_metrics() self.cleanup_metric_tag_configurations() - self.cleanup_host_tags() + # self.cleanup_host_tags() self.cleanup_logs_restriction_queries() # self.cleanup_integrations_aws() From dd37f78d2d9d71fc843be961e11dbdd188a8b5ce Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 23 Aug 2023 18:48:10 +0200 Subject: [PATCH 04/23] debug rate limiting by printing headers --- scripts/cleanup_org.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/cleanup_org.py b/scripts/cleanup_org.py index 8b4f0d8d..7d705e9c 100644 --- a/scripts/cleanup_org.py +++ b/scripts/cleanup_org.py @@ -231,7 +231,7 @@ def get_resources(self, path, *args, **kwargs): resp = requests.get(url, headers=self.headers, timeout=60, *args, **kwargs) resp.raise_for_status() except requests.exceptions.HTTPError as e: - print("Error getting url %s: %s", url, e) + print("Error getting url %s: %s, %s", url, e, e.response.headers) return return resp.json() @@ -242,7 +242,7 @@ def delete_resource(self, _id, path, **kwargs): resp.raise_for_status() print("deleted resource ", url, _id) except requests.exceptions.HTTPError as e: - print("Error deleting resource: %s", e) + print("Error deleting resource: %s, %s", e, e.response.headers) def get_headers(): From a7e5867cb39f5f02f119c8aaae19715f53ab9bb3 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Tue, 12 Sep 2023 19:42:08 +0200 Subject: [PATCH 05/23] gitignore emacs --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 3e5ebb51..f94d1b12 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ datadog_sync/version.py **/*.pyc **/*.pyo +**/*~ .vscode/ .idea/ .coverage From 455eb93276cbfd350bbf0f7c9f7416c6da6ad408 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 13 Sep 2023 11:54:55 +0200 Subject: [PATCH 06/23] deepomatic fork readme --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 3752e00e..a3c03737 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,12 @@ +# Deepomatic fork +Dump of additional hackish import/sync/cleanup_org for extra resources not supported by upstream, used by Deepomatic when migrating region. Maybe it will help somebody. + +Inspiration: https://careers.wolt.com/en/blog/tech/datadog-migration-wolt + +Warning: it's a hack, with shortcuts: +- it is *not* endorsed by Datadog (or supported by Deepomatic) + + # datadog-sync-cli Datadog cli tool to sync resources across organizations. From 5e01b18ad79dc6dd6051e60790ce6bc0042b7e81 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 13 Sep 2023 11:45:18 +0200 Subject: [PATCH 07/23] add cookie dogweb auth mode needed for hack web frontend api usage (e.g. logs_facets) when defining config cookie_dogweb value, the client *switches* to cookie *only*: the api doesn't support both api/app key & cookie auth. => when setting cookie_dogweb config, it will break all standard resources types. also need csrf_token for destination as _authentication_token key in request payload for write actions. --- README.md | 8 ++++++++ datadog_sync/commands/shared/options.py | 21 +++++++++++++++++++++ datadog_sync/constants.py | 4 ++++ datadog_sync/utils/configuration.py | 10 ++++++++-- datadog_sync/utils/custom_client.py | 20 +++++++++++++++----- 5 files changed, 56 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index a3c03737..6319b6bc 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,16 @@ Dump of additional hackish import/sync/cleanup_org for extra resources not suppo Inspiration: https://careers.wolt.com/en/blog/tech/datadog-migration-wolt +For some resources: it uses non-official api (from web frontend), using `dogweb` cookie and `x-csrf-token` header +``` +source_cookie_dogweb="xxx" +destination_cookie_dogweb="xxx" +destination_csrf_token="xxx" +``` Warning: it's a hack, with shortcuts: - it is *not* endorsed by Datadog (or supported by Deepomatic) +- authentication is either/or: cookie_dogweb config are required for those resources, and datadog-cli switches to cookie dogweb mode if config set, it *will not* work for other resources +- web frontend api is not documented, it could break at any time # datadog-sync-cli diff --git a/datadog_sync/commands/shared/options.py b/datadog_sync/commands/shared/options.py index 8a30ff95..7aa2a7a9 100644 --- a/datadog_sync/commands/shared/options.py +++ b/datadog_sync/commands/shared/options.py @@ -51,6 +51,13 @@ def handle_parse_result(self, ctx: Context, opts: Dict[Any, Any], args: List[Any help="Datadog source organization API url.", cls=CustomOptionClass, ), + option( + "--source-cookie-dogweb", + envvar=constants.DD_SOURCE_COOKIE_DOGWEB, + required=False, + help="Datadog source organization 'dogweb' cookie.", + cls=CustomOptionClass, + ), ] _destination_auth_options = [ @@ -77,6 +84,20 @@ def handle_parse_result(self, ctx: Context, opts: Dict[Any, Any], args: List[Any help="Datadog destination organization API url.", cls=CustomOptionClass, ), + option( + "--destination-cookie-dogweb", + envvar=constants.DD_DESTINATION_COOKIE_DOGWEB, + required=False, + help="Datadog destination organization 'dogweb' cookie.", + cls=CustomOptionClass, + ), + option( + "--destination-csrf-token", + envvar=constants.DD_DESTINATION_CSRF_TOKEN, + required=False, + help="Datadog destination organization 'x-csrf-token' header.", + cls=CustomOptionClass, + ), ] diff --git a/datadog_sync/constants.py b/datadog_sync/constants.py index 17117f03..70b5a6b9 100644 --- a/datadog_sync/constants.py +++ b/datadog_sync/constants.py @@ -7,9 +7,12 @@ DD_SOURCE_API_URL = "DD_SOURCE_API_URL" DD_SOURCE_API_KEY = "DD_SOURCE_API_KEY" DD_SOURCE_APP_KEY = "DD_SOURCE_APP_KEY" +DD_SOURCE_COOKIE_DOGWEB = "DD_SOURCE_COOKIE_DOGWEB" DD_DESTINATION_API_URL = "DD_DESTINATION_API_URL" DD_DESTINATION_API_KEY = "DD_DESTINATION_API_KEY" DD_DESTINATION_APP_KEY = "DD_DESTINATION_APP_KEY" +DD_DESTINATION_COOKIE_DOGWEB = "DD_DESTINATION_COOKIE_DOGWEB" +DD_DESTINATION_CSRF_TOKEN = "DD_DESTINATION_CSRF_TOKEN" DD_HTTP_CLIENT_RETRY_TIMEOUT = "DD_HTTP_CLIENT_RETRY_TIMEOUT" DD_HTTP_CLIENT_TIMEOUT = "DD_HTTP_CLIENT_TIMEOUT" DD_RESOURCES = "DD_RESOURCES" @@ -30,6 +33,7 @@ SOURCE_ORIGIN = "source" DESTINATION_ORIGIN = "destination" VALIDATE_ENDPOINT = "/api/v1/validate" +VALIDATE_ENDPOINT_COOKIEAUTH = "/api/v1/settings/favorite/list" # Commands CMD_IMPORT = "import" diff --git a/datadog_sync/utils/configuration.py b/datadog_sync/utils/configuration.py index 0c83d551..2efa1507 100644 --- a/datadog_sync/utils/configuration.py +++ b/datadog_sync/utils/configuration.py @@ -14,7 +14,7 @@ from datadog_sync.utils.base_resource import BaseResource from datadog_sync.utils.log import Log from datadog_sync.utils.filter import Filter, process_filters -from datadog_sync.constants import CMD_DIFFS, CMD_IMPORT, CMD_SYNC, FALSE, FORCE, LOGGER_NAME, TRUE, VALIDATE_ENDPOINT +from datadog_sync.constants import CMD_DIFFS, CMD_IMPORT, CMD_SYNC, FALSE, FORCE, LOGGER_NAME, TRUE, VALIDATE_ENDPOINT, VALIDATE_ENDPOINT_COOKIEAUTH from datadog_sync.utils.resource_utils import CustomClientHTTPError @@ -50,12 +50,15 @@ def build_config(cmd: str, **kwargs: Optional[Any]) -> Configuration: source_auth = { "apiKeyAuth": kwargs.get("source_api_key", ""), "appKeyAuth": kwargs.get("source_app_key", ""), + "cookieDogWeb": kwargs.get("source_cookie_dogweb", ""), } source_client = CustomClient(source_api_url, source_auth, retry_timeout, timeout) destination_auth = { "apiKeyAuth": kwargs.get("destination_api_key", ""), "appKeyAuth": kwargs.get("destination_app_key", ""), + "cookieDogWeb": kwargs.get("destination_cookie_dogweb", ""), + "x-csrf-token": kwargs.get("destination_csrf_token", ""), } destination_client = CustomClient(destination_api_url, destination_auth, retry_timeout, timeout) @@ -129,7 +132,10 @@ def init_resources(cfg: Configuration) -> Dict[str, BaseResource]: def _validate_client(client: CustomClient) -> None: logger = logging.getLogger(LOGGER_NAME) try: - client.get(VALIDATE_ENDPOINT).json() + if client.cookieauth: + client.get(VALIDATE_ENDPOINT_COOKIEAUTH).json() + else: + client.get(VALIDATE_ENDPOINT).json() except CustomClientHTTPError as e: logger.error(f"invalid api key: {e}") exit(1) diff --git a/datadog_sync/utils/custom_client.py b/datadog_sync/utils/custom_client.py index d8b5580a..0df3899e 100644 --- a/datadog_sync/utils/custom_client.py +++ b/datadog_sync/utils/custom_client.py @@ -62,7 +62,9 @@ def __init__(self, host: Optional[str], auth: Dict[str, str], retry_timeout: int self.timeout = timeout self.session = requests.Session() self.retry_timeout = retry_timeout - self.session.headers.update(build_default_headers(auth)) + self.cookieauth = bool(auth.get("cookieDogWeb")) + self.csrf_token = auth.get("x-csrf-token") + self.session.headers.update(build_default_headers(auth, self.cookieauth)) self.default_pagination = PaginationConfig() @request_with_retry @@ -123,13 +125,21 @@ def wrapper(*args, **kwargs): return wrapper -def build_default_headers(auth_obj: Dict[str, str]) -> Dict[str, str]: +def build_default_headers(auth_obj: Dict[str, str], is_cookieauth_mode: bool) -> Dict[str, str]: headers = { - "DD-API-KEY": auth_obj["apiKeyAuth"], - "DD-APPLICATION-KEY": auth_obj["appKeyAuth"], "Content-Type": "application/json", "User-Agent": _get_user_agent(), } + if is_cookieauth_mode: + headers |= { + "Cookie": "dogweb=" + auth_obj["cookieDogWeb"], + } + + else: + headers |= { + "DD-API-KEY": auth_obj["apiKeyAuth"], + "DD-APPLICATION-KEY": auth_obj["appKeyAuth"], + } return headers @@ -139,7 +149,7 @@ def _get_user_agent() -> str: except (ModuleNotFoundError, ImportError): version = None - return "datadog-sync-cli/{version} (python {pyver}; os {os}; arch {arch})".format( + return "datadog-sync-cli/{version}-deepomatic-patch (python {pyver}; os {os}; arch {arch})".format( version=version, pyver=platform.python_version(), os=platform.system().lower(), From 0af262b11d01d7abeb954f562ac9876f03b01c24 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Thu, 7 Sep 2023 19:06:42 +0200 Subject: [PATCH 08/23] additional resource: logs_facets - hardcoded source & target scopeid for now (seem to be internal index id, but which ones?) - use x-csrf-token as _authentication_token key in request payload for write actions --- README.md | 6 ++ datadog_sync/model/logs_facets.py | 103 ++++++++++++++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 110 insertions(+) create mode 100644 datadog_sync/model/logs_facets.py diff --git a/README.md b/README.md index 6319b6bc..b6506f0c 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,12 @@ Warning: it's a hack, with shortcuts: - web frontend api is not documented, it could break at any time +## extra resources +### logs_facets +how to use: +- edit hardcoded `sourceid` in `datadog_sync/model/logs_facets.py` for your organizations, by getting the values in URLs with manual update facet on the web ui. +- setup dogweb cookie mode, cf above + # datadog-sync-cli Datadog cli tool to sync resources across organizations. diff --git a/datadog_sync/model/logs_facets.py b/datadog_sync/model/logs_facets.py new file mode 100644 index 00000000..280437c4 --- /dev/null +++ b/datadog_sync/model/logs_facets.py @@ -0,0 +1,103 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from copy import deepcopy +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class LogsFacets(BaseResource): + resource_type = "logs_facets" + resource_config = ResourceConfig( + base_path="/api/v1/logs", + excluded_attributes=["bounded", "bundledAndUsed"], + ) + # Additional LogsFacets specific attributes + destination_logs_facets: Dict[str, Dict] = dict() + + # TODO stop hardcoding those; see what the web frontend does + source_scopeid = "1762986" + destination_scopeid = "1000288307" + + + def get_resources(self, client: CustomClient) -> List[Dict]: + resp = client.get(self.resource_config.base_path + "/facet_lists?type=logs").json() + + return resp["facets"]["logs"] + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + source_client = self.config.source_client + resource = source_client.get(self.resource_config.base_path + f"/scopes/{self.source_scopeid}/facets/{_id}").json() + + resource = cast(dict, resource) + if not resource["editable"]: + return + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + self.destination_logs_facets = self.get_destination_logs_facets() + + def create_resource(self, _id: str, resource: Dict) -> None: + if _id in self.destination_logs_facets: + self.resource_config.destination_resources[_id] = self.destination_logs_facets[_id] + self.update_resource(_id, resource) + return + + destination_client = self.config.destination_client + payload = deepcopy(resource) + payload["_authentication_token"] = destination_client.csrf_token + resp = destination_client.post( + self.resource_config.base_path + + f"/scopes/{self.destination_scopeid}/facets?type=logs", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = deepcopy(resource) + payload["_authentication_token"] = destination_client.csrf_token + resp = destination_client.post( + self.resource_config.base_path + + f"/scopes/{self.destination_scopeid}/facets/" + + f"{self.resource_config.destination_resources[_id]['id']}?type=logs", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp + + def delete_resource(self, _id: str) -> None: + destination_client = self.config.destination_client + payload = {} + payload["_authentication_token"] = destination_client.csrf_token + destination_client.delete( + self.resource_config.base_path + + f"/scopes/{self.destination_scopeid}/facets/" + + f"{self.resource_config.destination_resources[_id]['id']}?type=logs", + payload, + ).json() + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + pass + + def get_destination_logs_facets(self) -> Dict[str, Dict]: + destination_logs_facets = {} + destination_client = self.config.destination_client + + resp = self.get_resources(destination_client) + for log_facet in resp: + destination_logs_facets[log_facet["id"]] = log_facet + + return destination_logs_facets diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index 6184f7a4..479caaf5 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -23,3 +23,4 @@ from datadog_sync.model.logs_indexes import LogsIndexes from datadog_sync.model.logs_restriction_queries import LogsRestrictionQueries from datadog_sync.model.spans_metrics import SpansMetrics +from datadog_sync.model.logs_facets import LogsFacets From 8226ca5b1751ee477e5230f3601fe196cb0a9ea6 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 13 Sep 2023 14:54:54 +0200 Subject: [PATCH 09/23] additional resource: logs_views - use dogweb cookie auth - use x-csrf-token as _authentication_token key in request payload for write actions --- README.md | 4 ++ datadog_sync/model/logs_views.py | 87 ++++++++++++++++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 92 insertions(+) create mode 100644 datadog_sync/model/logs_views.py diff --git a/README.md b/README.md index b6506f0c..1fa7ca27 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,10 @@ how to use: - edit hardcoded `sourceid` in `datadog_sync/model/logs_facets.py` for your organizations, by getting the values in URLs with manual update facet on the web ui. - setup dogweb cookie mode, cf above +### logs_views +how to use: +- setup dogweb cookie mode, cf above + # datadog-sync-cli Datadog cli tool to sync resources across organizations. diff --git a/datadog_sync/model/logs_views.py b/datadog_sync/model/logs_views.py new file mode 100644 index 00000000..692517bb --- /dev/null +++ b/datadog_sync/model/logs_views.py @@ -0,0 +1,87 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class LogsViews(BaseResource): + resource_type = "logs_views" + resource_config = ResourceConfig( + resource_connections={"logs_indexes": ["index"]}, + base_path="/api/v1/logs/views", + excluded_attributes=[ + "modified_at", + "author", + "id", + "integration_id", + "integration_short_name", + "is_favorite" + ] + ) + # Additional LogsViews specific attributes + + def get_resources(self, client: CustomClient) -> List[Dict]: + resp = client.get(self.resource_config.base_path).json() + + return resp["logs_views"] + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + source_client = self.config.source_client + resource = source_client.get(self.resource_config.base_path + f"/{_id}").json()["logs_view"] + + resource = cast(dict, resource) + # skip integrations saved views + if resource["integration_id"]: + return + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + pass + + def create_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = resource + payload["_authentication_token"] = destination_client.csrf_token + resp = destination_client.post( + self.resource_config.base_path, + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = resource + payload["_authentication_token"] = destination_client.csrf_token + resp = destination_client.put( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp + + def delete_resource(self, _id: str) -> None: + destination_client = self.config.destination_client + payload = {} + payload["_authentication_token"] = destination_client.csrf_token + destination_client.delete( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}?type=logs", + payload, + ).json() + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + pass diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index 479caaf5..c493b532 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -24,3 +24,4 @@ from datadog_sync.model.logs_restriction_queries import LogsRestrictionQueries from datadog_sync.model.spans_metrics import SpansMetrics from datadog_sync.model.logs_facets import LogsFacets +from datadog_sync.model.logs_views import LogsViews From 62293f457226f76d5c31e4e7a5c62df9dbcd86f4 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Tue, 12 Sep 2023 20:43:43 +0200 Subject: [PATCH 10/23] additional resource: metric_metadatas - get all metric metadata with standard v2 api, but it returns only ids - hack import_resource standard pattern to populate the dict that contains just 'id'; it's called during import just after get_resources() - get & update metric metadata uses v1 api, which doesn't have 'id' in its dict, hack adding it everywhere for consistency with v2/standard pattern - create metric metadata is *not* supported by datadog api, just update it on already existing metric: raise error explaining that: first push data-points on metric, then rerun the script - use standard pattern to get the destination resources (just ids here), and call update_resource() instead if it already exists - initial diff won't be the actual diff: it says it will create everything --- README.md | 4 ++ datadog_sync/model/metric_metadatas.py | 85 ++++++++++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 90 insertions(+) create mode 100644 datadog_sync/model/metric_metadatas.py diff --git a/README.md b/README.md index 1fa7ca27..a3d94868 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,10 @@ how to use: how to use: - setup dogweb cookie mode, cf above +### metric_metadatas +create metric metadata is *not* supported by datadog api, we can just update it on already existing metric. +- first push data-points on metric, then rerun the script when new metrics are populated + # datadog-sync-cli Datadog cli tool to sync resources across organizations. diff --git a/datadog_sync/model/metric_metadatas.py b/datadog_sync/model/metric_metadatas.py new file mode 100644 index 00000000..98e81633 --- /dev/null +++ b/datadog_sync/model/metric_metadatas.py @@ -0,0 +1,85 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class MetricMetadatas(BaseResource): + resource_type = "metric_metadatas" + resource_config = ResourceConfig( + base_path="/api/v1/metrics", + excluded_attributes=["integration"], + ) + # Additional MetricMetadatas specific attributes + destination_metric_metadatas: Dict[str, Dict] = dict() + + def get_resources(self, client: CustomClient) -> List[Dict]: + resp = client.get("/api/v2/metrics").json()["data"] + + # cleanup "type": "metrics", + for metric in resp: + del metric['type'] + + # return objects with only "id" field + return resp + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if resource.keys() == {'id'}: + # we get only the id from the metrics list, force getting metric metadata individually + _id = resource['id'] + if _id: + source_client = self.config.source_client + resource = source_client.get(self.resource_config.base_path + f"/{_id}").json() + resource['id'] = _id + + resource = cast(dict, resource) + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + self.destination_metric_metadatas = self.get_destination_metric_metadatas() + + def create_resource(self, _id: str, resource: Dict) -> None: + if _id in self.destination_metric_metadatas: + self.resource_config.destination_resources[_id] = self.destination_metric_metadatas[_id] + self.update_resource(_id, resource) + return + + raise Exception("creating metric_metadatas is not supported: push data-points to it and the rerun (it will then update it instead of trying to create)") + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = resource + resp = destination_client.put( + self.resource_config.base_path + f"/{self.resource_config.destination_resources[_id]['id']}", + payload, + ).json() + resp['id'] = self.resource_config.destination_resources[_id]['id'] + + self.resource_config.destination_resources[_id] = resp + + def delete_resource(self, _id: str) -> None: + raise Exception("deleting metric_metadatas is not supported") + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + pass + + def get_destination_metric_metadatas(self) -> Dict[str, Dict]: + destination_metric_metadatas = {} + destination_client = self.config.destination_client + + resp = self.get_resources(destination_client) + for metric_metadata in resp: + destination_metric_metadatas[metric_metadata["id"]] = metric_metadata + + return destination_metric_metadatas diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index c493b532..9d18d4be 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -19,6 +19,7 @@ from datadog_sync.model.notebooks import Notebooks from datadog_sync.model.logs_metrics import LogsMetrics from datadog_sync.model.host_tags import HostTags +from datadog_sync.model.metric_metadatas import MetricMetadatas from datadog_sync.model.metric_tag_configurations import MetricTagConfigurations from datadog_sync.model.logs_indexes import LogsIndexes from datadog_sync.model.logs_restriction_queries import LogsRestrictionQueries From 9469688e0ea6de7eb2286a756a684ff56520932d Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Tue, 26 Sep 2023 18:35:21 +0200 Subject: [PATCH 11/23] Fix crash on resource_connection when hitting None instead of object just when len(keys_list) == 1 --- datadog_sync/utils/resource_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/datadog_sync/utils/resource_utils.py b/datadog_sync/utils/resource_utils.py index 90c53c94..6339ed63 100644 --- a/datadog_sync/utils/resource_utils.py +++ b/datadog_sync/utils/resource_utils.py @@ -47,6 +47,8 @@ def find_attr(keys_list_str: str, resource_to_connect: str, r_obj: Any, connect_ if failed: failed_connections.extend(failed) return failed_connections + elif r_obj is None: + return None else: keys_list = keys_list_str.split(".", 1) From 276d2e05fdedc25b62155b1140bc3d81cb62904c Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 13 Sep 2023 16:45:22 +0200 Subject: [PATCH 12/23] additional resource: incidents Only the base incident data is supported, related resources (integrations(slack), todos(remediations), attachments) may be done later with dedicated resources. The import is lossy: for example the creation date is on sync, timeline is lost, etc. - the datadog api documentation says only a subset of accepted fields for creation; in practice it does handles only a subset, and ignores the others => auto update just after create to sync more data - attributes.notification_handles ignored: too hard to reproduce properly, spams people during sync (or dates are all wrong); we don't really care about that piece of information => skip it - avoid forever diff on old incidents without `attributes.visibility`: hardcode `organization` value, it's what will be created on the destination organization anyway - incidents list api initially skipped old incidents without `visibility` field, exchange with datadog support resulted in a production fix - after full import: it seems users not yet active on the destination organization are in fact *not* notified (active incidents commanders are notified, and mandatory according to api documentation) --- README.md | 9 +++ datadog_sync/model/incidents.py | 125 ++++++++++++++++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 135 insertions(+) create mode 100644 datadog_sync/model/incidents.py diff --git a/README.md b/README.md index a3d94868..8ff64d3b 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,15 @@ how to use: create metric metadata is *not* supported by datadog api, we can just update it on already existing metric. - first push data-points on metric, then rerun the script when new metrics are populated +### incidents +The supported scenario is importing all incidents (in order) so `public_id` (1, 2, etc.) are identical in source & destination organizations: never create new incidents in the destination organization before finishing the migration with datadog-sync-cli. + +Only the base incident data is supported, related resources (integrations(slack), todos(remediations), attachments) may be done later with dedicated resources. + +The import is lossy: for example the creation date is on sync, timeline is lost, etc. + +'notifications' explicitly not-sync'ed to avoid spamming people during import (although later tests seem to conclude 'inactive' user (invitation pending: sync'ed users, but they never connected to the destination region) are *not* notified) + # datadog-sync-cli Datadog cli tool to sync resources across organizations. diff --git a/datadog_sync/model/incidents.py b/datadog_sync/model/incidents.py new file mode 100644 index 00000000..f9ecd04c --- /dev/null +++ b/datadog_sync/model/incidents.py @@ -0,0 +1,125 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig +from datadog_sync.utils.custom_client import PaginationConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class Incidents(BaseResource): + resource_type = "incidents" + resource_config = ResourceConfig( + resource_connections={ + "users": [ + "relationships.commander_user.data.id", + ] + }, + base_path="/api/v2/incidents", + excluded_attributes=[ + "id", + "attributes.public_id", + "attributes.commander", # somehow returned by create or update, not by get + "attributes.last_modified_by", # somehow returned by create or update, not by get + "attributes.last_modified_by_uuid", + "attributes.created", + "attributes.modified", + "attributes.created_by", # somehow returned by create or update, not by get + "attributes.created_by_uuid", + "attributes.notification_handles", # too hard to support properly, also, it gives wrong dates, and possibly spams people, we don't want that; ok to loose that info + "attributes.time_to_resolve", + "attributes.customer_impact_duration", # computed field + "relationships.created_by_user", + "relationships.last_modified_by_user", + "relationships.user_defined_fields", + "relationships.integrations", + "relationships.attachments", + "relationships.responders", + "relationships.impacts", + ], + non_nullable_attr=[ + "attributes.creation_idempotency_key", + "attributes.customer_impact_scope", + ], + + ) + # Additional Incidents specific attributes + pagination_config = PaginationConfig( + page_size=100, + page_number_param="page[offset]", + page_size_param="page[size]", + # this endpoint uses offset (number of items) instead of page number, workaround the paginated client by reusing `page_number` to store offset instead (computed here because we don't have `resp`) + page_number_func=lambda idx, page_size, page_number: page_size * (idx + 1), + # just return 1, the pagination loop already handles breaking when a page is smaller than page size + remaining_func=lambda *args: 1, + ) + + def get_resources(self, client: CustomClient) -> List[Dict]: + # we return the incidents in public_id order, so creating them on a fresh organizations will gives us the same public_id in source & destination organizations + + resp = client.paginated_request(client.get)( + self.resource_config.base_path, pagination_config=self.pagination_config + ) + + return resp + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + source_client = self.config.source_client + resource = source_client.get(self.resource_config.base_path + f"/{_id}").json()["data"] + + resource = cast(dict, resource) + + # it's the new default imposed by the api; forcing it here so we don't have a forever-diff + if "visibility" in resource["attributes"] and resource["attributes"]["visibility"] is None: + resource["attributes"]["visibility"] = "organization" + + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + pass + + def create_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = {"data": resource} + # the datadog api documentation says only a subset of accepted fields for creation; in practice it does handles only a subset, and ignores the others + resp = destination_client.post( + self.resource_config.base_path, + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + # create doesn't accept everything right away, e.g. attributes.resolved; follow the create by an update to sync more data + self.update_resource(_id, resource) + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = {"data": resource} + + resp = destination_client.patch( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def delete_resource(self, _id: str) -> None: + destination_client = self.config.destination_client + destination_client.delete( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}" + ) + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + return super(Incidents, self).connect_id(key, r_obj, resource_to_connect) diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index 9d18d4be..8f85f0c3 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -26,3 +26,4 @@ from datadog_sync.model.spans_metrics import SpansMetrics from datadog_sync.model.logs_facets import LogsFacets from datadog_sync.model.logs_views import LogsViews +from datadog_sync.model.incidents import Incidents From 9518f0d2f8b60ccf7f1a7a4a4ae70dc9b83af057 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Tue, 26 Sep 2023 19:41:20 +0200 Subject: [PATCH 13/23] Add deepomatic-specific incident fields migration: Namespace=>kube_namespace if kube_namespace is not null, don't overwrite: the data is better. --- datadog_sync/model/incidents.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/datadog_sync/model/incidents.py b/datadog_sync/model/incidents.py index f9ecd04c..19b04a76 100644 --- a/datadog_sync/model/incidents.py +++ b/datadog_sync/model/incidents.py @@ -80,6 +80,11 @@ def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = if "visibility" in resource["attributes"] and resource["attributes"]["visibility"] is None: resource["attributes"]["visibility"] = "organization" + # let's do some deepomatic-specific incidents fields migrations: + if "Namespace" in resource["attributes"]["fields"] and resource["attributes"]["fields"]["Namespace"]["value"] is not None and "kube_namespace" in resource["attributes"]["fields"] and resource["attributes"]["fields"]["kube_namespace"]["value"] is None: + resource["attributes"]["fields"]["kube_namespace"]["value"] = resource["attributes"]["fields"]["Namespace"]["value"] + resource["attributes"]["fields"]["Namespace"]["value"] = None + self.resource_config.source_resources[resource["id"]] = resource def pre_resource_action_hook(self, _id, resource: Dict) -> None: From 395916d93547d868cef92294b0f51fe43a6436c1 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 4 Oct 2023 17:34:48 +0200 Subject: [PATCH 14/23] Fix crash in remove_excluded_attr() and remove_non_nullable_attributes() when last level is an array of non-nested objects --- datadog_sync/utils/resource_utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/datadog_sync/utils/resource_utils.py b/datadog_sync/utils/resource_utils.py index 6339ed63..03aa999a 100644 --- a/datadog_sync/utils/resource_utils.py +++ b/datadog_sync/utils/resource_utils.py @@ -83,6 +83,10 @@ def remove_non_nullable_attributes(resource_config, resource): def del_attr(k_list, resource): + if isinstance(resource, list): + for r in resource: + del_attr(k_list, r) + return if len(k_list) == 1: resource.pop(k_list[0], None) else: @@ -92,6 +96,10 @@ def del_attr(k_list, resource): def del_null_attr(k_list, resource): + if isinstance(resource, list): + for r in resource: + del_null_attr(k_list, r) + return if len(k_list) == 1 and k_list[0] in resource and resource[k_list[0]] is None: resource.pop(k_list[0], None) elif len(k_list) > 1 and resource[k_list[0]] is not None: From 6e922d74b23d2c3218a607bbca714c45c5d94431 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 13 Sep 2023 19:44:13 +0200 Subject: [PATCH 15/23] additional resource: incident_org_settings - undocumented api, but standard v2 api used by web frontend, works with API/APP key - just one resource per org, forcing update, ignoring ids, etc. --- README.md | 4 ++ datadog_sync/model/incident_org_settings.py | 63 +++++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 68 insertions(+) create mode 100644 datadog_sync/model/incident_org_settings.py diff --git a/README.md b/README.md index 8ff64d3b..f7c0b251 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,10 @@ The import is lossy: for example the creation date is on sync, timeline is lost, 'notifications' explicitly not-sync'ed to avoid spamming people during import (although later tests seem to conclude 'inactive' user (invitation pending: sync'ed users, but they never connected to the destination region) are *not* notified) +### incident_org_settings +- undocumented api, but standard v2 api used by web frontend, works with API/APP key +- just one resource per org, forcing update, ignoring ids, etc. + # datadog-sync-cli Datadog cli tool to sync resources across organizations. diff --git a/datadog_sync/model/incident_org_settings.py b/datadog_sync/model/incident_org_settings.py new file mode 100644 index 00000000..be5f2b9c --- /dev/null +++ b/datadog_sync/model/incident_org_settings.py @@ -0,0 +1,63 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class IncidentOrgSettings(BaseResource): + resource_type = "incident_org_settings" + resource_config = ResourceConfig( + base_path="/api/v2/incidents/config/org/settings", + excluded_attributes=[ + "id", + "attributes.modified", + ] + ) + # Additional Incidents specific attributes + + def get_resources(self, client: CustomClient) -> List[Dict]: + resp = client.get(self.resource_config.base_path).json()["data"] + return [ resp ] + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + # there is only one settings, ignoring id + source_client = self.config.source_client + resource = source_client.get(self.resource_config.base_path).json()["data"] + + resource = cast(dict, resource) + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + pass + + def create_resource(self, _id: str, resource: Dict) -> None: + # the settings is always there, just update + self.update_resource(_id, resource) + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = {"data": resource} + resp = destination_client.patch( + self.resource_config.base_path, + payload, + ).json()["data"] + + self.resource_config.destination_resources[_id] = resp + + def delete_resource(self, _id: str) -> None: + raise Exception("deleting incident_org_settings is not supported") + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + pass diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index 8f85f0c3..42d004e2 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -27,3 +27,4 @@ from datadog_sync.model.logs_facets import LogsFacets from datadog_sync.model.logs_views import LogsViews from datadog_sync.model.incidents import Incidents +from datadog_sync.model.incident_org_settings import IncidentOrgSettings From 6f8723c291e1e60d23d26a6a8b43f04c2312b7fd Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 13 Sep 2023 20:29:38 +0200 Subject: [PATCH 16/23] additional resource: incidents_config_fields - unique by attributes.names - perpetual diff: on 'metadata' for ootb service & team: - PATCH ok (maybe ignores metadata?) - but PATCH response contains `metadata: null` => `diffs` always shows it; it's ok, we can ignore those --- README.md | 6 + datadog_sync/model/incidents_config_fields.py | 113 ++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 120 insertions(+) create mode 100644 datadog_sync/model/incidents_config_fields.py diff --git a/README.md b/README.md index f7c0b251..cf2718f6 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,12 @@ The import is lossy: for example the creation date is on sync, timeline is lost, - undocumented api, but standard v2 api used by web frontend, works with API/APP key - just one resource per org, forcing update, ignoring ids, etc. +### incidents_config_fields +- perpetual diff: on 'metadata' for ootb service & team: + - PATCH ok (maybe ignores metadata?) + - but PATCH response contains `metadata: null` + => `diffs` always shows it; it's ok, we can ignore those + # datadog-sync-cli Datadog cli tool to sync resources across organizations. diff --git a/datadog_sync/model/incidents_config_fields.py b/datadog_sync/model/incidents_config_fields.py new file mode 100644 index 00000000..c626c8b0 --- /dev/null +++ b/datadog_sync/model/incidents_config_fields.py @@ -0,0 +1,113 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig +from datadog_sync.utils.custom_client import PaginationConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class IncidentsConfigFields(BaseResource): + resource_type = "incidents_config_fields" + resource_config = ResourceConfig( + base_path="/api/v2/incidents/config/fields", + excluded_attributes=[ + "attributes.created_by", + "attributes.created_by_uuid", + "attributes.last_modified_by", + "attributes.last_modified_by_uuid", + "attributes.created", + "attributes.modified", + "relationships.created_by_user", + "relationships.last_modified_by_user", + "id", + ] + ) + # Additional Incidents specific attributes + pagination_config = PaginationConfig( + page_size=1000, + page_number_param="page[offset]", + page_size_param="page[limit]", + # this endpoint uses offset (number of items) instead of page number, workaround the paginated client by reusing `page_number` to store offset instead (computed here because we don't have `resp`) + page_number_func=lambda idx, page_size, page_number: page_size * (idx + 1), + # just return 1, the pagination loop already handles breaking when a page is smaller than page size + remaining_func=lambda *args: 1, + ) + # key: (unique) attributes.name + destination_incidents_config_fields: Dict[str, Dict] = dict() + + def get_resources(self, client: CustomClient) -> List[Dict]: + resp = client.paginated_request(client.get)( + self.resource_config.base_path, + pagination_config=self.pagination_config + ) + + return resp + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + source_client = self.config.source_client + resource = source_client.get(self.resource_config.base_path + f"/{_id}").json()["data"] + + resource = cast(dict, resource) + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + self.destination_incidents_config_fields = self.get_destination_incidents_config_fields() + + def create_resource(self, _id: str, resource: Dict) -> None: + # names are unique: patching existing ones instead of create + name = resource["attributes"]["name"] + if name in self.destination_incidents_config_fields: + self.resource_config.destination_resources[_id] = self.destination_incidents_config_fields[name] + self.update_resource(_id, resource) + return + + destination_client = self.config.destination_client + payload = {"data": resource} + resp = destination_client.post( + self.resource_config.base_path, + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = {"data": resource} + resp = destination_client.patch( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def delete_resource(self, _id: str) -> None: + destination_client = self.config.destination_client + destination_client.delete( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}" + ) + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + pass + + def get_destination_incidents_config_fields(self) -> Dict[str, Dict]: + destination_incidents_config_fields = {} + destination_client = self.config.destination_client + + resp = self.get_resources(destination_client) + for log_facet in resp: + destination_incidents_config_fields[log_facet["attributes"]["name"]] = log_facet + + return destination_incidents_config_fields diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index 42d004e2..54449abf 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -28,3 +28,4 @@ from datadog_sync.model.logs_views import LogsViews from datadog_sync.model.incidents import Incidents from datadog_sync.model.incident_org_settings import IncidentOrgSettings +from datadog_sync.model.incidents_config_fields import IncidentsConfigFields From f753f568a3762e1f57e23e780d6aa24285f8496b Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 13 Sep 2023 21:01:44 +0200 Subject: [PATCH 17/23] additional resource: incidents_config_notifications_templates --- README.md | 2 + ...ncidents_config_notifications_templates.py | 76 +++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 79 insertions(+) create mode 100644 datadog_sync/model/incidents_config_notifications_templates.py diff --git a/README.md b/README.md index cf2718f6..11c804eb 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,8 @@ The import is lossy: for example the creation date is on sync, timeline is lost, - but PATCH response contains `metadata: null` => `diffs` always shows it; it's ok, we can ignore those +### incidents_config_notifications_templates + # datadog-sync-cli Datadog cli tool to sync resources across organizations. diff --git a/datadog_sync/model/incidents_config_notifications_templates.py b/datadog_sync/model/incidents_config_notifications_templates.py new file mode 100644 index 00000000..642d6b2d --- /dev/null +++ b/datadog_sync/model/incidents_config_notifications_templates.py @@ -0,0 +1,76 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class IncidentsConfigNotificationsTemplates(BaseResource): + resource_type = "incidents_config_notifications_templates" + resource_config = ResourceConfig( + base_path="/api/v2/incidents/config/notifications/templates", + excluded_attributes=[ + "attributes.created_by_uuid", + "attributes.last_modified_by_uuid", + "attributes.created", + "attributes.modified", + "id", + ] + ) + # Additional IncidentsConfigNotificationsTemplates specific attributes + + def get_resources(self, client: CustomClient) -> List[Dict]: + resp = client.get(self.resource_config.base_path).json() + return resp["data"] + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + source_client = self.config.source_client + resource = source_client.get(self.resource_config.base_path + f"/{_id}").json()["data"] + + resource = cast(dict, resource) + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + pass + + def create_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = {"data": resource} + resp = destination_client.post( + self.resource_config.base_path, + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = {"data": resource} + resp = destination_client.patch( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def delete_resource(self, _id: str) -> None: + destination_client = self.config.destination_client + destination_client.delete( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}" + ) + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + pass diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index 54449abf..7d0fec95 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -29,3 +29,4 @@ from datadog_sync.model.incidents import Incidents from datadog_sync.model.incident_org_settings import IncidentOrgSettings from datadog_sync.model.incidents_config_fields import IncidentsConfigFields +from datadog_sync.model.incidents_config_notifications_templates import IncidentsConfigNotificationsTemplates From c46370a91e3d0478b57e01465595a410a2cdab91 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 13 Sep 2023 21:47:23 +0200 Subject: [PATCH 18/23] additional resource: incidents_config_integrations_workflows Covers General>Integrations & Notifications>Rules - api inconsistency: `attributes.triggers.variables.severity_values` and `attributes.triggers.variables.status_values` are `null` in read calls, and require an array in write calls => skipping them with non_nullable_attr (fixed to support lists too) - errors (probably because some workflows are hardcoded, not duplicable, but no obvious attribute to distingish them) - Error: 400 Bad Request - {"errors":["a workflow like that already exists"]} - Error: 400 Bad Request - {"errors":["Invalid payload: 'name' is invalid"]} => ignoring those errors for now, and manually fixed `Send all incident updates to a global channel` via web frontend. --- README.md | 8 ++ ...incidents_config_integrations_workflows.py | 81 +++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 90 insertions(+) create mode 100644 datadog_sync/model/incidents_config_integrations_workflows.py diff --git a/README.md b/README.md index 11c804eb..78796062 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,14 @@ The import is lossy: for example the creation date is on sync, timeline is lost, ### incidents_config_notifications_templates +### incidents_config_integrations_workflows +Covers General>Integrations & Notifications>Rules +- (api inconsistency: `attributes.triggers.variables.severity_values` and `attributes.triggers.variables.status_values` are `null` in read calls, and require an array in write calls) +- errors (probably because some workflows are hardcoded, not duplicable, but no obvious attribute to distingish them) + - Error: 400 Bad Request - {"errors":["a workflow like that already exists"]} + - Error: 400 Bad Request - {"errors":["Invalid payload: 'name' is invalid"]} + => ignoring those errors for now, and manually fixed `Send all incident updates to a global channel` via web frontend. + # datadog-sync-cli Datadog cli tool to sync resources across organizations. diff --git a/datadog_sync/model/incidents_config_integrations_workflows.py b/datadog_sync/model/incidents_config_integrations_workflows.py new file mode 100644 index 00000000..5a26dfe2 --- /dev/null +++ b/datadog_sync/model/incidents_config_integrations_workflows.py @@ -0,0 +1,81 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class IncidentsConfigIntegrationsWorkflows(BaseResource): + resource_type = "incidents_config_integrations_workflows" + resource_config = ResourceConfig( + resource_connections={ + "incidents_config_notifications_templates": [ + "attributes.steps.variables.notification_template.id" + ] + }, + base_path="/api/v2/incidents/config/integrations/workflows", + excluded_attributes=[ + "id", + ], + non_nullable_attr=[ + "attributes.triggers.variables.severity_values", + "attributes.triggers.variables.status_values", + ], + ) + # Additional IncidentsConfigIntegrationsWorkflows specific attributes + + def get_resources(self, client: CustomClient) -> List[Dict]: + resp = client.get(self.resource_config.base_path).json() + return resp["data"] + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + source_client = self.config.source_client + resource = source_client.get(self.resource_config.base_path + f"/{_id}").json()["data"] + + resource = cast(dict, resource) + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + pass + + def create_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = {"data": resource} + resp = destination_client.post( + self.resource_config.base_path, + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = {"data": resource} + resp = destination_client.patch( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def delete_resource(self, _id: str) -> None: + destination_client = self.config.destination_client + destination_client.delete( + self.resource_config.base_path + + f"/{self.resource_config.destination_resources[_id]['id']}" + ) + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + return super(IncidentsConfigIntegrationsWorkflows, self).connect_id(key, r_obj, resource_to_connect) diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index 7d0fec95..6416b544 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -30,3 +30,4 @@ from datadog_sync.model.incident_org_settings import IncidentOrgSettings from datadog_sync.model.incidents_config_fields import IncidentsConfigFields from datadog_sync.model.incidents_config_notifications_templates import IncidentsConfigNotificationsTemplates +from datadog_sync.model.incidents_config_integrations_workflows import IncidentsConfigIntegrationsWorkflows From 4daf218851465a4712cd452a9c85a5a3b2a31307 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Thu, 28 Sep 2023 15:47:31 +0200 Subject: [PATCH 19/23] additional resource: incidents_todos - iterate on all incidents, then for each incident, iterate on relationships 'todo' --- README.md | 3 + datadog_sync/model/incidents_todos.py | 114 ++++++++++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 118 insertions(+) create mode 100644 datadog_sync/model/incidents_todos.py diff --git a/README.md b/README.md index 78796062..f777128b 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,9 @@ The import is lossy: for example the creation date is on sync, timeline is lost, 'notifications' explicitly not-sync'ed to avoid spamming people during import (although later tests seem to conclude 'inactive' user (invitation pending: sync'ed users, but they never connected to the destination region) are *not* notified) +### incidents_todos +- creation date & author is lost, as usual + ### incident_org_settings - undocumented api, but standard v2 api used by web frontend, works with API/APP key - just one resource per org, forcing update, ignoring ids, etc. diff --git a/datadog_sync/model/incidents_todos.py b/datadog_sync/model/incidents_todos.py new file mode 100644 index 00000000..76a36206 --- /dev/null +++ b/datadog_sync/model/incidents_todos.py @@ -0,0 +1,114 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig +from datadog_sync.utils.custom_client import PaginationConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class IncidentsTodos(BaseResource): + resource_type = "incidents_todos" + resource_config = ResourceConfig( + resource_connections={ + "incidents": [ + "attributes.incident_id", + ] + }, + base_path="/api/v2/incidents", + excluded_attributes=[ + "id", + "attributes.last_modified_by", # somehow returned by create or update, not by get + "attributes.last_modified_by_uuid", + "attributes.created", + "attributes.modified", + "attributes.created_by", # somehow returned by create or update, not by get + "attributes.created_by_uuid", + "relationships.created_by_user", + "relationships.last_modified_by_user", + ] + ) + # Additional IncidentsTodos specific attributes + pagination_config = PaginationConfig( + page_size=100, + page_number_param="page[offset]", + page_size_param="page[size]", + # this endpoint uses offset (number of items) instead of page number, workaround the paginated client by reusing `page_number` to store offset instead (computed here because we don't have `resp`) + page_number_func=lambda idx, page_size, page_number: page_size * (idx + 1), + # just return 1, the pagination loop already handles breaking when a page is smaller than page size + remaining_func=lambda *args: 1, + ) + todos_path: str = "/api/v2/incidents/{incident_id}/relationships/todos" + + def get_resources(self, client: CustomClient) -> List[Dict]: + # first, get all incidents, then for each incidents, get all incidents todos + resp_incidents = client.paginated_request(client.get)( + self.resource_config.base_path, + pagination_config=self.pagination_config + ) + + resp = [] + for incident in resp_incidents: + resp += client.paginated_request(client.get)( + # use public id, to avoid connecting manually the resource here (we are in the get_resources, it's not usually done there, so not free); this assumes the public IDs between source & destination are in sync, which should be the case if importing incidents via datadog-sync-cli, cf comments in that resource + self.todos_path.format(incident_id=incident["attributes"]["public_id"]), + pagination_config=self.pagination_config + ) + return resp + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + raise Exception("importing incidents_todos by id is not supported: we need not only the incidents_todos id (which we have) but also the parent incident id, which we do not have.") + + resource = cast(dict, resource) + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + pass + + def create_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + destination_incident_id = resource["attributes"].pop("incident_id") + payload = {"data": resource} + + resp = destination_client.post( + # incidents api works both with public_id and id, here we use the connected (converted to the destination incident) uuid id + self.todos_path.format(incident_id=destination_incident_id), + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + destination_incident_id = resource["attributes"].pop("incident_id") + payload = {"data": resource} + resp = destination_client.patch( + # incidents api works both with public_id and id, here we use the connected (converted to the destination incident) uuid id + self.todos_path.format(incident_id=destination_incident_id) + + f"/{self.resource_config.destination_resources[_id]['id']}", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def delete_resource(self, _id: str) -> None: + destination_client = self.config.destination_client + destination_incident_id = resource["attributes"].pop("incident_id") + destination_client.delete( + # incidents api works both with public_id and id, here we use the connected (converted to the destination incident) uuid id + self.todos_path.format(incident_id=destination_incident_id) + + f"/{self.resource_config.destination_resources[_id]['id']}" + ) + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + return super(IncidentsTodos, self).connect_id(key, r_obj, resource_to_connect) diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index 6416b544..c53933c6 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -27,6 +27,7 @@ from datadog_sync.model.logs_facets import LogsFacets from datadog_sync.model.logs_views import LogsViews from datadog_sync.model.incidents import Incidents +from datadog_sync.model.incidents_todos import IncidentsTodos from datadog_sync.model.incident_org_settings import IncidentOrgSettings from datadog_sync.model.incidents_config_fields import IncidentsConfigFields from datadog_sync.model.incidents_config_notifications_templates import IncidentsConfigNotificationsTemplates From 8d8fd9c29b92fe2a82ae608d151f89ed04032498 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 4 Oct 2023 16:42:13 +0200 Subject: [PATCH 20/23] additional resource: incidents_integrations api bug: https://help.datadoghq.com/hc/en-us/requests/1377270 --- README.md | 3 + datadog_sync/model/incidents_integrations.py | 117 +++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 121 insertions(+) create mode 100644 datadog_sync/model/incidents_integrations.py diff --git a/README.md b/README.md index f777128b..e16733ad 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,9 @@ The import is lossy: for example the creation date is on sync, timeline is lost, 'notifications' explicitly not-sync'ed to avoid spamming people during import (although later tests seem to conclude 'inactive' user (invitation pending: sync'ed users, but they never connected to the destination region) are *not* notified) +### incidents_integrations +- api bug: it url-escapes slack `redirect_url` `&` query-string separator character before saving: this leads to a forever diff: datadog-sync-cli tries to PATCH the correct value on each sync, the server saves a wrong value. + ### incidents_todos - creation date & author is lost, as usual diff --git a/datadog_sync/model/incidents_integrations.py b/datadog_sync/model/incidents_integrations.py new file mode 100644 index 00000000..19aa1587 --- /dev/null +++ b/datadog_sync/model/incidents_integrations.py @@ -0,0 +1,117 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig +from datadog_sync.utils.custom_client import PaginationConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class IncidentsIntegrations(BaseResource): + resource_type = "incidents_integrations" + resource_config = ResourceConfig( + resource_connections={ + "incidents": [ + "attributes.incident_id", + ] + }, + base_path="/api/v2/incidents", + excluded_attributes=[ + "id", + "attributes.last_modified_by", # somehow returned by create or update, not by get + "attributes.last_modified_by_uuid", + "attributes.created", + "attributes.modified", + "attributes.created_by", # somehow returned by create or update, not by get + "attributes.created_by_uuid", + "relationships.created_by_user", + "relationships.last_modified_by_user", + "attributes.status", # after create, it's always `4`: `indicates manually updated` + "attributes.metadata.channels.org_id", + "attributes.metadata.channels.incident_uuid", + ] + ) + # Additional IncidentsIntegrations specific attributes + pagination_config = PaginationConfig( + page_size=100, + page_number_param="page[offset]", + page_size_param="page[size]", + # this endpoint uses offset (number of items) instead of page number, workaround the paginated client by reusing `page_number` to store offset instead (computed here because we don't have `resp`) + page_number_func=lambda idx, page_size, page_number: page_size * (idx + 1), + # just return 1, the pagination loop already handles breaking when a page is smaller than page size + remaining_func=lambda *args: 1, + ) + integrations_path: str = "/api/v2/incidents/{incident_id}/relationships/integrations" + + def get_resources(self, client: CustomClient) -> List[Dict]: + # first, get all incidents, then for each incidents, get all incidents integrations + resp_incidents = client.paginated_request(client.get)( + self.resource_config.base_path, + pagination_config=self.pagination_config + ) + + resp = [] + for incident in resp_incidents: + resp += client.paginated_request(client.get)( + # use public id, to avoid connecting manually the resource here (we are in the get_resources, it's not usually done there, so not free); this assumes the public IDs between source & destination are in sync, which should be the case if importing incidents via datadog-sync-cli, cf comments in that resource + self.integrations_path.format(incident_id=incident["attributes"]["public_id"]), + pagination_config=self.pagination_config + ) + return resp + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + raise Exception("importing incidents_integrations by id is not supported: we need not only the incidents_integrations id (which we have) but also the parent incident id, which we do not have.") + + resource = cast(dict, resource) + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + pass + + def create_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + destination_incident_id = resource["attributes"].pop("incident_id") + payload = {"data": resource} + + resp = destination_client.post( + # incidents api works both with public_id and id, here we use the connected (converted to the destination incident) uuid id + self.integrations_path.format(incident_id=destination_incident_id), + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + destination_incident_id = resource["attributes"].pop("incident_id") + payload = {"data": resource} + resp = destination_client.patch( + # incidents api works both with public_id and id, here we use the connected (converted to the destination incident) uuid id + self.integrations_path.format(incident_id=destination_incident_id) + + f"/{self.resource_config.destination_resources[_id]['id']}", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp["data"] + + def delete_resource(self, _id: str) -> None: + destination_client = self.config.destination_client + destination_incident_id = resource["attributes"].pop("incident_id") + destination_client.delete( + # incidents api works both with public_id and id, here we use the connected (converted to the destination incident) uuid id + self.integrations_path.format(incident_id=destination_incident_id) + + f"/{self.resource_config.destination_resources[_id]['id']}" + ) + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + return super(IncidentsIntegrations, self).connect_id(key, r_obj, resource_to_connect) diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index c53933c6..6c017429 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -27,6 +27,7 @@ from datadog_sync.model.logs_facets import LogsFacets from datadog_sync.model.logs_views import LogsViews from datadog_sync.model.incidents import Incidents +from datadog_sync.model.incidents_integrations import IncidentsIntegrations from datadog_sync.model.incidents_todos import IncidentsTodos from datadog_sync.model.incident_org_settings import IncidentOrgSettings from datadog_sync.model.incidents_config_fields import IncidentsConfigFields From 2f48fa828994dea9912a001e1c7ceb9f312b2b5b Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Thu, 5 Oct 2023 15:47:39 +0200 Subject: [PATCH 21/23] additional resource: integrations_slack_channels - api doesn't support `display.muting` option, cf https://help.datadoghq.com/hc/en-us/requests/1380152 --- README.md | 8 ++ .../model/integrations_slack_channels.py | 83 +++++++++++++++++++ datadog_sync/models/__init__.py | 1 + 3 files changed, 92 insertions(+) create mode 100644 datadog_sync/model/integrations_slack_channels.py diff --git a/README.md b/README.md index e16733ad..41bf032b 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,14 @@ Covers General>Integrations & Notifications>Rules - Error: 400 Bad Request - {"errors":["Invalid payload: 'name' is invalid"]} => ignoring those errors for now, and manually fixed `Send all incident updates to a global channel` via web frontend. +### integrations_slack_channels +how to use: +- supports only *one* slack account +- api doesn't support `muting` option +- manually create the slack integration in destination organization, with *same name* as in source +- edit hardcoded `slack_account_name` in `datadog_sync/model/integrations_slack_channels.py` for your organizations +- run import & diffs & sync as usual + # datadog-sync-cli Datadog cli tool to sync resources across organizations. diff --git a/datadog_sync/model/integrations_slack_channels.py b/datadog_sync/model/integrations_slack_channels.py new file mode 100644 index 00000000..6d1bc776 --- /dev/null +++ b/datadog_sync/model/integrations_slack_channels.py @@ -0,0 +1,83 @@ +# Unless explicitly stated otherwise all files in this repository are licensed +# under the 3-clause BSD style license (see LICENSE). +# This product includes software developed at Datadog (https://www.datadoghq.com/). +# Copyright 2019 Datadog, Inc. + +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, List, Dict, cast + +from datadog_sync.utils.base_resource import BaseResource, ResourceConfig + +if TYPE_CHECKING: + from datadog_sync.utils.custom_client import CustomClient + + +class IntegrationsSlackChannels(BaseResource): + resource_type = "integrations_slack_channels" + resource_config = ResourceConfig( + base_path="/api/v1/integration/slack/configuration/accounts/{account_name}/channels", + excluded_attributes=[ + "id", + ] + ) + # Additional Incidents specific attributes + slack_account_name = "deepo" # <-- to edit + + def get_resources(self, client: CustomClient) -> List[Dict]: + resp = client.get( + self.resource_config.base_path.format(account_name=self.slack_account_name) + ).json() + # fabricate id == channel name as required by datadog_sync + return [{"id": r["name"].strip('#'), **r} for r in resp] + + def import_resource(self, _id: Optional[str] = None, resource: Optional[Dict] = None) -> None: + if _id: + # there is only one settings, ignoring id + source_client = self.config.source_client + resource = source_client.get( + self.resource_config.base_path.format(account_name=self.slack_account_name) + + f"/{_id}" + ).json() + + resource = cast(dict, resource) + self.resource_config.source_resources[resource["id"]] = resource + + def pre_resource_action_hook(self, _id, resource: Dict) -> None: + pass + + def pre_apply_hook(self) -> None: + pass + + def create_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = resource + resp = destination_client.post( + self.resource_config.base_path.format(account_name=self.slack_account_name), + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp + + def update_resource(self, _id: str, resource: Dict) -> None: + destination_client = self.config.destination_client + payload = resource + resp = destination_client.post( + # same id in source & destination: the channel name + self.resource_config.base_path.format(account_name=self.slack_account_name) + + f"/{_id}", + payload, + ).json() + + self.resource_config.destination_resources[_id] = resp + + def delete_resource(self, _id: str) -> None: + destination_client = self.config.destination_client + destination_client.delete( + # same id in source & destination: the channel name + self.resource_config.base_path.format(account_name=self.slack_account_name) + + f"/{_id}" + ) + + + def connect_id(self, key: str, r_obj: Dict, resource_to_connect: str) -> Optional[List[str]]: + pass diff --git a/datadog_sync/models/__init__.py b/datadog_sync/models/__init__.py index 6c017429..8135e9e4 100644 --- a/datadog_sync/models/__init__.py +++ b/datadog_sync/models/__init__.py @@ -33,3 +33,4 @@ from datadog_sync.model.incidents_config_fields import IncidentsConfigFields from datadog_sync.model.incidents_config_notifications_templates import IncidentsConfigNotificationsTemplates from datadog_sync.model.incidents_config_integrations_workflows import IncidentsConfigIntegrationsWorkflows +from datadog_sync.model.integrations_slack_channels import IntegrationsSlackChannels From 1d25d5d576007874d7b55099ca4b79eb3f7683ca Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Mon, 9 Oct 2023 16:39:23 +0200 Subject: [PATCH 22/23] README.md: distinguish deepomatic fork from upstream --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 1e1c2e03..41b27ca1 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,8 @@ how to use: - edit hardcoded `slack_account_name` in `datadog_sync/model/integrations_slack_channels.py` for your organizations - run import & diffs & sync as usual +--- + # datadog-sync-cli Datadog cli tool to sync resources across organizations. From 7acb450aade3f386d2e7d68ccde3888748d1d6b0 Mon Sep 17 00:00:00 2001 From: Thomas Riccardi Date: Wed, 19 Feb 2025 20:13:42 +0100 Subject: [PATCH 23/23] Add .dockerignore --- .dockerignore | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..8653b6fc --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +**/__pycache__/ +**/.mypy_cache/ \ No newline at end of file