From c3e8e6a82c9e6d74fff6dac3ec70519cdddb8d88 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sat, 28 Mar 2026 16:21:05 +0530 Subject: [PATCH 01/79] Configure Argo CD bootstrap for helm-argocd --- argocd/README.md | 55 +++++++++++++++ argocd/applicationsets/00-mongodb.yaml | 58 +++++++++++++++ argocd/applicationsets/01-clickhouse.yaml | 62 ++++++++++++++++ argocd/applicationsets/02-kafka.yaml | 74 ++++++++++++++++++++ argocd/applicationsets/03-countly.yaml | 62 ++++++++++++++++ argocd/applicationsets/04-observability.yaml | 55 +++++++++++++++ argocd/projects/customers.yaml | 34 +++++++++ argocd/root-application.yaml | 22 ++++++ environments/helm-argocd/clickhouse.yaml | 5 ++ environments/helm-argocd/countly.yaml | 7 ++ environments/helm-argocd/global.yaml | 26 +++++++ environments/helm-argocd/kafka.yaml | 7 ++ environments/helm-argocd/mongodb.yaml | 5 ++ environments/helm-argocd/observability.yaml | 2 + 14 files changed, 474 insertions(+) create mode 100644 argocd/README.md create mode 100644 argocd/applicationsets/00-mongodb.yaml create mode 100644 argocd/applicationsets/01-clickhouse.yaml create mode 100644 argocd/applicationsets/02-kafka.yaml create mode 100644 argocd/applicationsets/03-countly.yaml create mode 100644 argocd/applicationsets/04-observability.yaml create mode 100644 argocd/projects/customers.yaml create mode 100644 argocd/root-application.yaml create mode 100644 environments/helm-argocd/clickhouse.yaml create mode 100644 environments/helm-argocd/countly.yaml create mode 100644 environments/helm-argocd/global.yaml create mode 100644 environments/helm-argocd/kafka.yaml create mode 100644 environments/helm-argocd/mongodb.yaml create mode 100644 environments/helm-argocd/observability.yaml diff --git a/argocd/README.md b/argocd/README.md new file mode 100644 index 0000000..a56747b --- /dev/null +++ b/argocd/README.md @@ -0,0 +1,55 @@ +# ArgoCD Bootstrap For Customer Deployments + +This folder bootstraps Countly for multiple customers using ArgoCD `ApplicationSet`. + +## What This Layout Does + +- `projects/` creates one ArgoCD `AppProject` per customer. +- `applicationsets/` generates one ArgoCD `Application` per component per customer. +- `environments//` stores the Helm values used by those Applications. +- `root-application.yaml` creates one parent ArgoCD Application that syncs this whole `argocd/` folder. + +For the initial rollout, ArgoCD is scoped to: + +- `helm-argocd` + +## Before You Sync + +1. Install ArgoCD and the ApplicationSet controller. +2. Register each target cluster in ArgoCD. +3. Update the cluster API servers in: + - `argocd/projects/customers.yaml` + - `argocd/applicationsets/*.yaml` + - The `server:` value must match the cluster entry registered in ArgoCD. +4. Replace the environment hostname in: + - `environments/helm-argocd/global.yaml` +5. Populate the direct values in the customer `secrets-*.yaml` files before the first deploy. +6. Configure ArgoCD custom health checks for MongoDB, ClickHouse, and Strimzi CRs. + +## Apply Order + +```bash +kubectl apply -f argocd/projects/customers.yaml -n argocd +kubectl apply -f argocd/applicationsets/ -n argocd +``` + +Or bootstrap everything with one parent app: + +```bash +kubectl apply -f argocd/root-application.yaml -n argocd +``` + +## Generated Application Order + +- Wave `0`: MongoDB, ClickHouse +- Wave `5`: Kafka +- Wave `10`: Countly +- Wave `15`: Observability + +## Add A New Customer Later + +1. Copy `environments/reference` to `environments/`. +2. Add the customer entry to each `ApplicationSet` list. +3. Add a matching `AppProject` to `argocd/projects/customers.yaml`. +4. Add that customer's Google Secret Manager keys. +5. Commit and let ArgoCD reconcile. diff --git a/argocd/applicationsets/00-mongodb.yaml b/argocd/applicationsets/00-mongodb.yaml new file mode 100644 index 0000000..657246b --- /dev/null +++ b/argocd/applicationsets/00-mongodb.yaml @@ -0,0 +1,58 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: countly-mongodb + namespace: argocd +spec: + generators: + - list: + elements: + - customer: helm-argocd + project: helm-argocd + server: https://34.135.156.216 + sizing: production + security: open + template: + metadata: + name: "{{customer}}-mongodb" + annotations: + argocd.argoproj.io/sync-wave: "0" + spec: + project: "{{project}}" + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + path: charts/countly-mongodb + helm: + releaseName: countly-mongodb + valueFiles: + - "../../environments/{{customer}}/global.yaml" + - "../../profiles/sizing/{{sizing}}/mongodb.yaml" + - "../../profiles/security/{{security}}/mongodb.yaml" + - "../../environments/{{customer}}/mongodb.yaml" + - "../../environments/{{customer}}/secrets-mongodb.yaml" + parameters: + - name: argocd.enabled + value: "true" + destination: + server: "{{server}}" + namespace: mongodb + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + - RespectIgnoreDifferences=true + retry: + limit: 5 + backoff: + duration: 5s + factor: 2 + maxDuration: 3m + ignoreDifferences: + - group: mongodbcommunity.mongodb.com + kind: MongoDBCommunity + jsonPointers: + - /status diff --git a/argocd/applicationsets/01-clickhouse.yaml b/argocd/applicationsets/01-clickhouse.yaml new file mode 100644 index 0000000..637416a --- /dev/null +++ b/argocd/applicationsets/01-clickhouse.yaml @@ -0,0 +1,62 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: countly-clickhouse + namespace: argocd +spec: + generators: + - list: + elements: + - customer: helm-argocd + project: helm-argocd + server: https://34.135.156.216 + sizing: production + security: open + template: + metadata: + name: "{{customer}}-clickhouse" + annotations: + argocd.argoproj.io/sync-wave: "0" + spec: + project: "{{project}}" + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + path: charts/countly-clickhouse + helm: + releaseName: countly-clickhouse + valueFiles: + - "../../environments/{{customer}}/global.yaml" + - "../../profiles/sizing/{{sizing}}/clickhouse.yaml" + - "../../profiles/security/{{security}}/clickhouse.yaml" + - "../../environments/{{customer}}/clickhouse.yaml" + - "../../environments/{{customer}}/secrets-clickhouse.yaml" + parameters: + - name: argocd.enabled + value: "true" + destination: + server: "{{server}}" + namespace: clickhouse + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + - RespectIgnoreDifferences=true + retry: + limit: 5 + backoff: + duration: 5s + factor: 2 + maxDuration: 3m + ignoreDifferences: + - group: clickhouse.com + kind: ClickHouseCluster + jsonPointers: + - /status + - group: clickhouse.com + kind: KeeperCluster + jsonPointers: + - /status diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml new file mode 100644 index 0000000..13a7baa --- /dev/null +++ b/argocd/applicationsets/02-kafka.yaml @@ -0,0 +1,74 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: countly-kafka + namespace: argocd +spec: + generators: + - list: + elements: + - customer: helm-argocd + project: helm-argocd + server: https://34.135.156.216 + sizing: production + security: open + observability: full + kafkaConnect: balanced + template: + metadata: + name: "{{customer}}-kafka" + annotations: + argocd.argoproj.io/sync-wave: "5" + spec: + project: "{{project}}" + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + path: charts/countly-kafka + helm: + releaseName: countly-kafka + valueFiles: + - "../../environments/{{customer}}/global.yaml" + - "../../profiles/sizing/{{sizing}}/kafka.yaml" + - "../../profiles/kafka-connect/{{kafkaConnect}}/kafka.yaml" + - "../../profiles/observability/{{observability}}/kafka.yaml" + - "../../profiles/security/{{security}}/kafka.yaml" + - "../../environments/{{customer}}/kafka.yaml" + - "../../environments/{{customer}}/secrets-kafka.yaml" + parameters: + - name: argocd.enabled + value: "true" + destination: + server: "{{server}}" + namespace: kafka + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + - RespectIgnoreDifferences=true + retry: + limit: 5 + backoff: + duration: 5s + factor: 2 + maxDuration: 3m + ignoreDifferences: + - group: kafka.strimzi.io + kind: Kafka + jsonPointers: + - /status + - group: kafka.strimzi.io + kind: KafkaConnect + jsonPointers: + - /status + - group: kafka.strimzi.io + kind: KafkaConnector + jsonPointers: + - /status + - group: kafka.strimzi.io + kind: KafkaNodePool + jsonPointers: + - /status diff --git a/argocd/applicationsets/03-countly.yaml b/argocd/applicationsets/03-countly.yaml new file mode 100644 index 0000000..63c5a71 --- /dev/null +++ b/argocd/applicationsets/03-countly.yaml @@ -0,0 +1,62 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: countly-app + namespace: argocd +spec: + generators: + - list: + elements: + - customer: helm-argocd + project: helm-argocd + server: https://34.135.156.216 + sizing: production + security: open + tls: letsencrypt + observability: full + template: + metadata: + name: "{{customer}}-countly" + annotations: + argocd.argoproj.io/sync-wave: "10" + spec: + project: "{{project}}" + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + path: charts/countly + helm: + releaseName: countly + valueFiles: + - "../../environments/{{customer}}/global.yaml" + - "../../profiles/sizing/{{sizing}}/countly.yaml" + - "../../profiles/tls/{{tls}}/countly.yaml" + - "../../profiles/observability/{{observability}}/countly.yaml" + - "../../profiles/security/{{security}}/countly.yaml" + - "../../environments/{{customer}}/countly.yaml" + - "../../environments/{{customer}}/secrets-countly.yaml" + parameters: + - name: argocd.enabled + value: "true" + destination: + server: "{{server}}" + namespace: countly + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + - RespectIgnoreDifferences=true + retry: + limit: 5 + backoff: + duration: 5s + factor: 2 + maxDuration: 3m + ignoreDifferences: + - group: networking.k8s.io + kind: Ingress + jsonPointers: + - /status diff --git a/argocd/applicationsets/04-observability.yaml b/argocd/applicationsets/04-observability.yaml new file mode 100644 index 0000000..3552800 --- /dev/null +++ b/argocd/applicationsets/04-observability.yaml @@ -0,0 +1,55 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: countly-observability + namespace: argocd +spec: + generators: + - list: + elements: + - customer: helm-argocd + project: helm-argocd + server: https://34.135.156.216 + sizing: production + security: open + observability: full + template: + metadata: + name: "{{customer}}-observability" + annotations: + argocd.argoproj.io/sync-wave: "15" + spec: + project: "{{project}}" + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + path: charts/countly-observability + helm: + releaseName: countly-observability + valueFiles: + - "../../environments/{{customer}}/global.yaml" + - "../../profiles/sizing/{{sizing}}/observability.yaml" + - "../../profiles/observability/{{observability}}/observability.yaml" + - "../../profiles/security/{{security}}/observability.yaml" + - "../../environments/{{customer}}/observability.yaml" + - "../../environments/{{customer}}/secrets-observability.yaml" + parameters: + - name: argocd.enabled + value: "true" + destination: + server: "{{server}}" + namespace: observability + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + - RespectIgnoreDifferences=true + retry: + limit: 5 + backoff: + duration: 5s + factor: 2 + maxDuration: 3m diff --git a/argocd/projects/customers.yaml b/argocd/projects/customers.yaml new file mode 100644 index 0000000..bba46a8 --- /dev/null +++ b/argocd/projects/customers.yaml @@ -0,0 +1,34 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: helm-argocd + namespace: argocd +spec: + description: "Countly deployment for helm-argocd test environment" + sourceRepos: + - '*' + destinations: + - server: https://34.135.156.216 + namespace: mongodb + - server: https://34.135.156.216 + namespace: clickhouse + - server: https://34.135.156.216 + namespace: kafka + - server: https://34.135.156.216 + namespace: countly + - server: https://34.135.156.216 + namespace: observability + - server: https://34.135.156.216 + namespace: countly-migration + clusterResourceWhitelist: + - group: storage.k8s.io + kind: StorageClass + - group: rbac.authorization.k8s.io + kind: ClusterRole + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + - group: cert-manager.io + kind: ClusterIssuer + namespaceResourceWhitelist: + - group: '*' + kind: '*' diff --git a/argocd/root-application.yaml b/argocd/root-application.yaml new file mode 100644 index 0000000..e0f4f23 --- /dev/null +++ b/argocd/root-application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: countly-bootstrap + namespace: argocd +spec: + project: default + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + path: argocd + directory: + recurse: true + destination: + server: https://kubernetes.default.svc + namespace: argocd + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - ServerSideApply=true diff --git a/environments/helm-argocd/clickhouse.yaml b/environments/helm-argocd/clickhouse.yaml new file mode 100644 index 0000000..5dead21 --- /dev/null +++ b/environments/helm-argocd/clickhouse.yaml @@ -0,0 +1,5 @@ +# ClickHouse overrides for helm-argocd +# +# Runtime sizing comes from: +# - profiles/sizing/production/clickhouse.yaml +# - profiles/security/open/clickhouse.yaml diff --git a/environments/helm-argocd/countly.yaml b/environments/helm-argocd/countly.yaml new file mode 100644 index 0000000..508c17a --- /dev/null +++ b/environments/helm-argocd/countly.yaml @@ -0,0 +1,7 @@ +# Countly overrides for helm-argocd +# +# Most runtime sizing comes from: +# - profiles/sizing/production/countly.yaml +# - profiles/tls/letsencrypt/countly.yaml +# - profiles/observability/full/countly.yaml +# - profiles/security/open/countly.yaml diff --git a/environments/helm-argocd/global.yaml b/environments/helm-argocd/global.yaml new file mode 100644 index 0000000..e6566f3 --- /dev/null +++ b/environments/helm-argocd/global.yaml @@ -0,0 +1,26 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: production + observability: full + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + storageClass: "" + imagePullSecrets: [] + +ingress: + hostname: helm-argocd.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/helm-argocd/kafka.yaml b/environments/helm-argocd/kafka.yaml new file mode 100644 index 0000000..02e5309 --- /dev/null +++ b/environments/helm-argocd/kafka.yaml @@ -0,0 +1,7 @@ +# Kafka overrides for helm-argocd +# +# Runtime sizing comes from: +# - profiles/sizing/production/kafka.yaml +# - profiles/kafka-connect/balanced/kafka.yaml +# - profiles/observability/full/kafka.yaml +# - profiles/security/open/kafka.yaml diff --git a/environments/helm-argocd/mongodb.yaml b/environments/helm-argocd/mongodb.yaml new file mode 100644 index 0000000..56dbbfb --- /dev/null +++ b/environments/helm-argocd/mongodb.yaml @@ -0,0 +1,5 @@ +# MongoDB overrides for helm-argocd +# +# Runtime sizing comes from: +# - profiles/sizing/production/mongodb.yaml +# - profiles/security/open/mongodb.yaml diff --git a/environments/helm-argocd/observability.yaml b/environments/helm-argocd/observability.yaml new file mode 100644 index 0000000..41ce526 --- /dev/null +++ b/environments/helm-argocd/observability.yaml @@ -0,0 +1,2 @@ +mode: full +clusterName: helm-argocd From 0de4a093da387ce9de2bdf7e1409bb7924b11efd Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sat, 28 Mar 2026 16:38:18 +0530 Subject: [PATCH 02/79] Add helm-argocd secret value files for testing --- environments/helm-argocd/secrets-clickhouse.yaml | 6 ++++++ environments/helm-argocd/secrets-countly.yaml | 16 ++++++++++++++++ environments/helm-argocd/secrets-kafka.yaml | 7 +++++++ environments/helm-argocd/secrets-mongodb.yaml | 13 +++++++++++++ .../helm-argocd/secrets-observability.yaml | 1 + 5 files changed, 43 insertions(+) create mode 100644 environments/helm-argocd/secrets-clickhouse.yaml create mode 100644 environments/helm-argocd/secrets-countly.yaml create mode 100644 environments/helm-argocd/secrets-kafka.yaml create mode 100644 environments/helm-argocd/secrets-mongodb.yaml create mode 100644 environments/helm-argocd/secrets-observability.yaml diff --git a/environments/helm-argocd/secrets-clickhouse.yaml b/environments/helm-argocd/secrets-clickhouse.yaml new file mode 100644 index 0000000..f08f949 --- /dev/null +++ b/environments/helm-argocd/secrets-clickhouse.yaml @@ -0,0 +1,6 @@ +# ClickHouse secrets — fill these in before first deploy. +auth: + defaultUserPassword: + password: "default123" + externalSecret: + enabled: false diff --git a/environments/helm-argocd/secrets-countly.yaml b/environments/helm-argocd/secrets-countly.yaml new file mode 100644 index 0000000..7912b81 --- /dev/null +++ b/environments/helm-argocd/secrets-countly.yaml @@ -0,0 +1,16 @@ +# Countly secrets — fill these in before first deploy. +# This file is intentionally using direct Helm values for initial setup. +secrets: + mode: values + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + #passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + username: "default" + password: "default123" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + mongodb: + password: "mongo123" diff --git a/environments/helm-argocd/secrets-kafka.yaml b/environments/helm-argocd/secrets-kafka.yaml new file mode 100644 index 0000000..efb2f9d --- /dev/null +++ b/environments/helm-argocd/secrets-kafka.yaml @@ -0,0 +1,7 @@ +# Kafka secrets — fill these in before first deploy. +kafkaConnect: + clickhouse: + username: "default" + password: "default123" + externalSecret: + enabled: false diff --git a/environments/helm-argocd/secrets-mongodb.yaml b/environments/helm-argocd/secrets-mongodb.yaml new file mode 100644 index 0000000..f96763f --- /dev/null +++ b/environments/helm-argocd/secrets-mongodb.yaml @@ -0,0 +1,13 @@ +# MongoDB secrets — fill these in before first deploy. +users: + app: + password: "mongo123" + createSecret: true + externalSecret: + enabled: false + metrics: + enabled: true + password: "mongo-metrics123" + createSecret: true + externalSecret: + enabled: false diff --git a/environments/helm-argocd/secrets-observability.yaml b/environments/helm-argocd/secrets-observability.yaml new file mode 100644 index 0000000..a1c2697 --- /dev/null +++ b/environments/helm-argocd/secrets-observability.yaml @@ -0,0 +1 @@ +# Observability secrets — none required for bundled mode by default. From 3b9637413ee552bb7d951b91984137fb3cb2b038 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sat, 28 Mar 2026 16:49:04 +0530 Subject: [PATCH 03/79] Allow Namespace in helm-argocd AppProject --- argocd/projects/customers.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/argocd/projects/customers.yaml b/argocd/projects/customers.yaml index bba46a8..5f1ef07 100644 --- a/argocd/projects/customers.yaml +++ b/argocd/projects/customers.yaml @@ -21,6 +21,8 @@ spec: - server: https://34.135.156.216 namespace: countly-migration clusterResourceWhitelist: + - group: "" + kind: Namespace - group: storage.k8s.io kind: StorageClass - group: rbac.authorization.k8s.io From d589d0c91f1fece1a75d96740cf6df29fd1a737e Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sat, 28 Mar 2026 16:53:25 +0530 Subject: [PATCH 04/79] Set helm-argocd Countly secret values --- environments/helm-argocd/secrets-countly.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/environments/helm-argocd/secrets-countly.yaml b/environments/helm-argocd/secrets-countly.yaml index 7912b81..ee72d2f 100644 --- a/environments/helm-argocd/secrets-countly.yaml +++ b/environments/helm-argocd/secrets-countly.yaml @@ -3,9 +3,9 @@ secrets: mode: values common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - #passwordSecret: "CHANGEME-min-8-chars" + encryptionReportsKey: "helm-argocd-reports-key-2026" + webSessionSecret: "helm-argocd-web-session-2026" + passwordSecret: "helm-argocd-password-secret-2026" clickhouse: username: "default" password: "default123" From a2a616a091cf8df612a6df11ebae13caaa6c3546 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sat, 28 Mar 2026 17:06:58 +0530 Subject: [PATCH 05/79] Add Argo CD operator bootstrap apps --- argocd/README.md | 3 ++ argocd/operators/00-cert-manager.yaml | 29 +++++++++++++++++++ argocd/operators/01-mongodb-crds.yaml | 26 +++++++++++++++++ argocd/operators/02-mongodb-operator.yaml | 30 ++++++++++++++++++++ argocd/operators/03-clickhouse-operator.yaml | 29 +++++++++++++++++++ argocd/operators/04-strimzi-operator.yaml | 26 +++++++++++++++++ 6 files changed, 143 insertions(+) create mode 100644 argocd/operators/00-cert-manager.yaml create mode 100644 argocd/operators/01-mongodb-crds.yaml create mode 100644 argocd/operators/02-mongodb-operator.yaml create mode 100644 argocd/operators/03-clickhouse-operator.yaml create mode 100644 argocd/operators/04-strimzi-operator.yaml diff --git a/argocd/README.md b/argocd/README.md index a56747b..7cffaaa 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -4,6 +4,7 @@ This folder bootstraps Countly for multiple customers using ArgoCD `ApplicationS ## What This Layout Does +- `operators/` bootstraps required platform operators into the target cluster. - `projects/` creates one ArgoCD `AppProject` per customer. - `applicationsets/` generates one ArgoCD `Application` per component per customer. - `environments//` stores the Helm values used by those Applications. @@ -20,6 +21,7 @@ For the initial rollout, ArgoCD is scoped to: 3. Update the cluster API servers in: - `argocd/projects/customers.yaml` - `argocd/applicationsets/*.yaml` + - `argocd/operators/*.yaml` - The `server:` value must match the cluster entry registered in ArgoCD. 4. Replace the environment hostname in: - `environments/helm-argocd/global.yaml` @@ -41,6 +43,7 @@ kubectl apply -f argocd/root-application.yaml -n argocd ## Generated Application Order +- Wave `-30` to `-26`: cert-manager, MongoDB CRDs/operator, ClickHouse operator, Strimzi operator - Wave `0`: MongoDB, ClickHouse - Wave `5`: Kafka - Wave `10`: Countly diff --git a/argocd/operators/00-cert-manager.yaml b/argocd/operators/00-cert-manager.yaml new file mode 100644 index 0000000..4694ca4 --- /dev/null +++ b/argocd/operators/00-cert-manager.yaml @@ -0,0 +1,29 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: cert-manager + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "-30" +spec: + project: default + source: + repoURL: https://charts.jetstack.io + chart: cert-manager + targetRevision: v1.17.2 + helm: + releaseName: cert-manager + parameters: + - name: installCRDs + value: "true" + destination: + server: https://34.135.156.216 + namespace: cert-manager + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + diff --git a/argocd/operators/01-mongodb-crds.yaml b/argocd/operators/01-mongodb-crds.yaml new file mode 100644 index 0000000..8c597a3 --- /dev/null +++ b/argocd/operators/01-mongodb-crds.yaml @@ -0,0 +1,26 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: mongodb-crds + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "-29" +spec: + project: default + source: + repoURL: https://github.com/mongodb/mongodb-kubernetes.git + targetRevision: "1.7.0" + path: public + directory: + include: crds.yaml + destination: + server: https://34.135.156.216 + namespace: mongodb + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + diff --git a/argocd/operators/02-mongodb-operator.yaml b/argocd/operators/02-mongodb-operator.yaml new file mode 100644 index 0000000..37cb5b8 --- /dev/null +++ b/argocd/operators/02-mongodb-operator.yaml @@ -0,0 +1,30 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: mongodb-kubernetes-operator + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "-28" +spec: + project: default + source: + repoURL: https://mongodb.github.io/helm-charts + chart: mongodb-kubernetes + targetRevision: 1.7.0 + helm: + releaseName: mongodb-kubernetes-operator + valuesObject: + operator: + watchedResources: + - mongodbcommunity + destination: + server: https://34.135.156.216 + namespace: mongodb + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + diff --git a/argocd/operators/03-clickhouse-operator.yaml b/argocd/operators/03-clickhouse-operator.yaml new file mode 100644 index 0000000..6700fe4 --- /dev/null +++ b/argocd/operators/03-clickhouse-operator.yaml @@ -0,0 +1,29 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: clickhouse-operator + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "-27" +spec: + project: default + source: + repoURL: ghcr.io/clickhouse + chart: clickhouse-operator-helm + targetRevision: 0.0.2 + helm: + releaseName: clickhouse-operator + valuesObject: + certManager: + install: false + destination: + server: https://34.135.156.216 + namespace: clickhouse-operator-system + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + diff --git a/argocd/operators/04-strimzi-operator.yaml b/argocd/operators/04-strimzi-operator.yaml new file mode 100644 index 0000000..e3d2ac9 --- /dev/null +++ b/argocd/operators/04-strimzi-operator.yaml @@ -0,0 +1,26 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: strimzi-kafka-operator + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "-26" +spec: + project: default + source: + repoURL: https://strimzi.io/charts/ + chart: strimzi-kafka-operator + targetRevision: 0.51.0 + helm: + releaseName: strimzi-kafka-operator + destination: + server: https://34.135.156.216 + namespace: kafka + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + From 30afdd04b842b1f6dd6a18752b07088fb5003976 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sat, 28 Mar 2026 17:51:30 +0530 Subject: [PATCH 06/79] Delay Countly HPAs until after deployments --- charts/countly/templates/_countly-component.tpl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/charts/countly/templates/_countly-component.tpl b/charts/countly/templates/_countly-component.tpl index f922930..7ec4119 100644 --- a/charts/countly/templates/_countly-component.tpl +++ b/charts/countly/templates/_countly-component.tpl @@ -218,6 +218,10 @@ metadata: labels: {{- include "countly.labels" $root | nindent 4 }} app.kubernetes.io/component: {{ $component }} + {{- if $root.Values.argocd.enabled }} + annotations: + {{- include "countly.syncWave" (dict "wave" "6" "root" $root) | nindent 4 }} + {{- end }} spec: scaleTargetRef: apiVersion: apps/v1 From 05ecbf5571aec3b7b3bd4f89c6a1a00ed549e50e Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sat, 28 Mar 2026 23:43:02 +0530 Subject: [PATCH 07/79] Add Argo CD nginx ingress bootstrap app --- argocd/README.md | 2 +- argocd/operators/05-nginx-ingress.yaml | 30 ++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 argocd/operators/05-nginx-ingress.yaml diff --git a/argocd/README.md b/argocd/README.md index 7cffaaa..dc77f66 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -43,7 +43,7 @@ kubectl apply -f argocd/root-application.yaml -n argocd ## Generated Application Order -- Wave `-30` to `-26`: cert-manager, MongoDB CRDs/operator, ClickHouse operator, Strimzi operator +- Wave `-30` to `-25`: cert-manager, MongoDB CRDs/operator, ClickHouse operator, Strimzi operator, NGINX ingress - Wave `0`: MongoDB, ClickHouse - Wave `5`: Kafka - Wave `10`: Countly diff --git a/argocd/operators/05-nginx-ingress.yaml b/argocd/operators/05-nginx-ingress.yaml new file mode 100644 index 0000000..362aebd --- /dev/null +++ b/argocd/operators/05-nginx-ingress.yaml @@ -0,0 +1,30 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: nginx-ingress + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "-25" +spec: + project: default + sources: + - repoURL: https://helm.nginx.com/stable + chart: nginx-ingress + targetRevision: 2.1.0 + helm: + releaseName: nginx-ingress + valueFiles: + - $values/nginx-ingress-values.yaml + - repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + ref: values + destination: + server: https://34.135.156.216 + namespace: ingress-nginx + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true From 4152c21c6062ab78596634f2be624ea40fd6eeef Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sat, 28 Mar 2026 23:52:51 +0530 Subject: [PATCH 08/79] Increase nginx ingress controller memory --- nginx-ingress-values.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nginx-ingress-values.yaml b/nginx-ingress-values.yaml index b95f8d3..7d5bcea 100644 --- a/nginx-ingress-values.yaml +++ b/nginx-ingress-values.yaml @@ -36,10 +36,10 @@ controller: resources: requests: cpu: "1" - memory: "1Gi" + memory: "2Gi" limits: - cpu: "1" - memory: "1Gi" + cpu: "2" + memory: "2Gi" # --- Service --- service: From c3bcb89220d4e8442e139219941cd9d10af58c60 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sun, 29 Mar 2026 00:05:36 +0530 Subject: [PATCH 09/79] Add GitOps letsencrypt ClusterIssuer --- argocd/README.md | 2 +- .../operators/06-letsencrypt-prod-issuer.yaml | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 argocd/operators/06-letsencrypt-prod-issuer.yaml diff --git a/argocd/README.md b/argocd/README.md index dc77f66..0127e42 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -43,7 +43,7 @@ kubectl apply -f argocd/root-application.yaml -n argocd ## Generated Application Order -- Wave `-30` to `-25`: cert-manager, MongoDB CRDs/operator, ClickHouse operator, Strimzi operator, NGINX ingress +- Wave `-30` to `-24`: cert-manager, MongoDB CRDs/operator, ClickHouse operator, Strimzi operator, NGINX ingress, Let’s Encrypt ClusterIssuer - Wave `0`: MongoDB, ClickHouse - Wave `5`: Kafka - Wave `10`: Countly diff --git a/argocd/operators/06-letsencrypt-prod-issuer.yaml b/argocd/operators/06-letsencrypt-prod-issuer.yaml new file mode 100644 index 0000000..2a6ddf7 --- /dev/null +++ b/argocd/operators/06-letsencrypt-prod-issuer.yaml @@ -0,0 +1,18 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod + namespace: cert-manager + annotations: + argocd.argoproj.io/sync-wave: "-24" +spec: + acme: + email: devops@count.ly + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-prod + solvers: + - http01: + ingress: + class: nginx + From 04fd60a2e62c314fc36486a70bb9d42db434f2d5 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sun, 29 Mar 2026 00:08:34 +0530 Subject: [PATCH 10/79] Target letsencrypt ClusterIssuer to deployment cluster --- .../clusterissuer.yaml} | 3 --- .../06-letsencrypt-prod-issuer-app.yaml | 24 +++++++++++++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) rename argocd/{operators/06-letsencrypt-prod-issuer.yaml => operator-manifests/letsencrypt-prod-issuer/clusterissuer.yaml} (79%) create mode 100644 argocd/operators/06-letsencrypt-prod-issuer-app.yaml diff --git a/argocd/operators/06-letsencrypt-prod-issuer.yaml b/argocd/operator-manifests/letsencrypt-prod-issuer/clusterissuer.yaml similarity index 79% rename from argocd/operators/06-letsencrypt-prod-issuer.yaml rename to argocd/operator-manifests/letsencrypt-prod-issuer/clusterissuer.yaml index 2a6ddf7..4eca398 100644 --- a/argocd/operators/06-letsencrypt-prod-issuer.yaml +++ b/argocd/operator-manifests/letsencrypt-prod-issuer/clusterissuer.yaml @@ -2,9 +2,6 @@ apiVersion: cert-manager.io/v1 kind: ClusterIssuer metadata: name: letsencrypt-prod - namespace: cert-manager - annotations: - argocd.argoproj.io/sync-wave: "-24" spec: acme: email: devops@count.ly diff --git a/argocd/operators/06-letsencrypt-prod-issuer-app.yaml b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml new file mode 100644 index 0000000..3373d6e --- /dev/null +++ b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: letsencrypt-prod-issuer + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "-24" +spec: + project: default + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + path: argocd/operator-manifests/letsencrypt-prod-issuer + directory: + recurse: true + destination: + server: https://34.135.156.216 + namespace: cert-manager + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - ServerSideApply=true From 086820fc3d6f0ff5512aab88f0d11fa1893d3e73 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sun, 29 Mar 2026 00:12:19 +0530 Subject: [PATCH 11/79] Exclude operator manifests from root bootstrap --- argocd/root-application.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/argocd/root-application.yaml b/argocd/root-application.yaml index e0f4f23..b123f37 100644 --- a/argocd/root-application.yaml +++ b/argocd/root-application.yaml @@ -11,6 +11,7 @@ spec: path: argocd directory: recurse: true + exclude: operator-manifests/** destination: server: https://kubernetes.default.svc namespace: argocd From 9fc4fe4d1990168ab78cfe3f49b77ba9293e9d4f Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sun, 29 Mar 2026 00:30:27 +0530 Subject: [PATCH 12/79] Fix LetsEncrypt HTTP-01 ingress annotations --- charts/countly/templates/ingress.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/charts/countly/templates/ingress.yaml b/charts/countly/templates/ingress.yaml index 413d051..3c610b6 100644 --- a/charts/countly/templates/ingress.yaml +++ b/charts/countly/templates/ingress.yaml @@ -14,6 +14,8 @@ metadata: {{- end }} {{- if eq $tlsMode "letsencrypt" }} cert-manager.io/cluster-issuer: {{ .Values.ingress.tls.clusterIssuer | default "letsencrypt-prod" | quote }} + acme.cert-manager.io/http01-ingress-class: {{ .Values.ingress.className | default "nginx" | quote }} + acme.cert-manager.io/http01-edit-in-place: "true" {{- end }} {{- if eq $tlsMode "selfSigned" }} cert-manager.io/cluster-issuer: {{ (.Values.ingress.tls.selfSigned).issuerName | default (printf "%s-ca-issuer" (include "countly.fullname" .)) | quote }} From 52dd0431ca2810934120548582b84c43101ba115 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sun, 29 Mar 2026 00:38:30 +0530 Subject: [PATCH 13/79] Use temporary cert for LetsEncrypt HTTP-01 --- charts/countly/templates/ingress.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/charts/countly/templates/ingress.yaml b/charts/countly/templates/ingress.yaml index 3c610b6..52b40da 100644 --- a/charts/countly/templates/ingress.yaml +++ b/charts/countly/templates/ingress.yaml @@ -16,6 +16,7 @@ metadata: cert-manager.io/cluster-issuer: {{ .Values.ingress.tls.clusterIssuer | default "letsencrypt-prod" | quote }} acme.cert-manager.io/http01-ingress-class: {{ .Values.ingress.className | default "nginx" | quote }} acme.cert-manager.io/http01-edit-in-place: "true" + cert-manager.io/issue-temporary-certificate: "true" {{- end }} {{- if eq $tlsMode "selfSigned" }} cert-manager.io/cluster-issuer: {{ (.Values.ingress.tls.selfSigned).issuerName | default (printf "%s-ca-issuer" (include "countly.fullname" .)) | quote }} From 5719fcc59f8964329cf767c5eae8de627642895c Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sun, 29 Mar 2026 01:04:27 +0530 Subject: [PATCH 14/79] Fix cert-manager HTTP-01 solver override --- charts/countly/templates/ingress.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/charts/countly/templates/ingress.yaml b/charts/countly/templates/ingress.yaml index 52b40da..e2c5f6d 100644 --- a/charts/countly/templates/ingress.yaml +++ b/charts/countly/templates/ingress.yaml @@ -14,7 +14,6 @@ metadata: {{- end }} {{- if eq $tlsMode "letsencrypt" }} cert-manager.io/cluster-issuer: {{ .Values.ingress.tls.clusterIssuer | default "letsencrypt-prod" | quote }} - acme.cert-manager.io/http01-ingress-class: {{ .Values.ingress.className | default "nginx" | quote }} acme.cert-manager.io/http01-edit-in-place: "true" cert-manager.io/issue-temporary-certificate: "true" {{- end }} From 27b3c297383ca59c6b27cfaa641fd215f6c9cab9 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sun, 29 Mar 2026 01:18:27 +0530 Subject: [PATCH 15/79] Ignore benign nginx ingress drift --- argocd/operators/05-nginx-ingress.yaml | 34 ++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/argocd/operators/05-nginx-ingress.yaml b/argocd/operators/05-nginx-ingress.yaml index 362aebd..b4843cb 100644 --- a/argocd/operators/05-nginx-ingress.yaml +++ b/argocd/operators/05-nginx-ingress.yaml @@ -28,3 +28,37 @@ spec: syncOptions: - CreateNamespace=true - ServerSideApply=true + - RespectIgnoreDifferences=true + ignoreDifferences: + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: apdoslogconfs.appprotectdos.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: apdospolicies.appprotectdos.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: aplogconfs.appprotect.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: appolicies.appprotect.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: apusersigs.appprotect.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: "" + kind: Service + name: nginx-ingress-controller + namespace: ingress-nginx + jsonPointers: + - /metadata/annotations/cloud.google.com~1neg + - /spec/healthCheckNodePort From 7da7eda6b1be6caaaf4717d64f2f6a57d4eaa618 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Sun, 29 Mar 2026 01:52:10 +0530 Subject: [PATCH 16/79] Refactor Argo CD for reusable customer onboarding --- README.md | 17 +++ argocd/README.md | 31 ++-- argocd/applicationsets/00-mongodb.yaml | 31 ++-- argocd/applicationsets/01-clickhouse.yaml | 31 ++-- argocd/applicationsets/02-kafka.yaml | 37 +++-- argocd/applicationsets/03-countly.yaml | 37 +++-- argocd/applicationsets/04-observability.yaml | 34 ++--- argocd/customers/helm-argocd.yaml | 9 ++ argocd/operators/00-cert-manager.yaml | 62 ++++---- argocd/operators/01-mongodb-crds.yaml | 56 +++++--- argocd/operators/02-mongodb-operator.yaml | 64 +++++---- argocd/operators/03-clickhouse-operator.yaml | 62 ++++---- argocd/operators/04-strimzi-operator.yaml | 56 +++++--- argocd/operators/05-nginx-ingress.yaml | 133 ++++++++++-------- .../06-letsencrypt-prod-issuer-app.yaml | 53 ++++--- argocd/projects/customers.yaml | 16 +-- argocd/root-application.yaml | 2 +- scripts/new-argocd-customer.sh | 104 ++++++++++++++ 18 files changed, 528 insertions(+), 307 deletions(-) create mode 100644 argocd/customers/helm-argocd.yaml create mode 100755 scripts/new-argocd-customer.sh diff --git a/README.md b/README.md index 6a9dc6d..2926655 100644 --- a/README.md +++ b/README.md @@ -172,6 +172,23 @@ Install required operators before deploying Countly. See [docs/PREREQUISITES.md] helmfile -e my-deployment apply ``` +### GitOps Customer Onboarding + +For Argo CD managed deployments, scaffold a new customer/cluster with: + +```bash +./scripts/new-argocd-customer.sh +``` + +This creates: +- `environments//` +- `argocd/customers/.yaml` + +Then: +1. fill in `environments//secrets-*.yaml` +2. commit +3. sync `countly-bootstrap` + ### Manual Installation (without Helmfile) ```bash diff --git a/argocd/README.md b/argocd/README.md index 0127e42..1f59684 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -5,23 +5,22 @@ This folder bootstraps Countly for multiple customers using ArgoCD `ApplicationS ## What This Layout Does - `operators/` bootstraps required platform operators into the target cluster. -- `projects/` creates one ArgoCD `AppProject` per customer. +- `projects/` defines the shared ArgoCD `AppProject` used by customer apps. +- `customers/` contains one metadata file per customer/cluster. - `applicationsets/` generates one ArgoCD `Application` per component per customer. - `environments//` stores the Helm values used by those Applications. - `root-application.yaml` creates one parent ArgoCD Application that syncs this whole `argocd/` folder. -For the initial rollout, ArgoCD is scoped to: +For the initial rollout, ArgoCD is scoped to one customer metadata file: -- `helm-argocd` +- `argocd/customers/helm-argocd.yaml` ## Before You Sync 1. Install ArgoCD and the ApplicationSet controller. 2. Register each target cluster in ArgoCD. -3. Update the cluster API servers in: - - `argocd/projects/customers.yaml` - - `argocd/applicationsets/*.yaml` - - `argocd/operators/*.yaml` +3. Create or update one customer metadata file in: + - `argocd/customers/.yaml` - The `server:` value must match the cluster entry registered in ArgoCD. 4. Replace the environment hostname in: - `environments/helm-argocd/global.yaml` @@ -43,7 +42,7 @@ kubectl apply -f argocd/root-application.yaml -n argocd ## Generated Application Order -- Wave `-30` to `-24`: cert-manager, MongoDB CRDs/operator, ClickHouse operator, Strimzi operator, NGINX ingress, Let’s Encrypt ClusterIssuer +- Wave `-30` to `-24`: per-customer cert-manager, MongoDB CRDs/operator, ClickHouse operator, Strimzi operator, NGINX ingress, Let’s Encrypt ClusterIssuer - Wave `0`: MongoDB, ClickHouse - Wave `5`: Kafka - Wave `10`: Countly @@ -51,8 +50,14 @@ kubectl apply -f argocd/root-application.yaml -n argocd ## Add A New Customer Later -1. Copy `environments/reference` to `environments/`. -2. Add the customer entry to each `ApplicationSet` list. -3. Add a matching `AppProject` to `argocd/projects/customers.yaml`. -4. Add that customer's Google Secret Manager keys. -5. Commit and let ArgoCD reconcile. +1. Run: + ```bash + ./scripts/new-argocd-customer.sh + ``` +2. Fill in `environments//secrets-*.yaml`. +3. Adjust any customer-specific overrides in `environments//*.yaml`. +4. Commit and let ArgoCD reconcile. + +Only two Git-managed inputs are required per new customer: +- `environments//` +- `argocd/customers/.yaml` diff --git a/argocd/applicationsets/00-mongodb.yaml b/argocd/applicationsets/00-mongodb.yaml index 657246b..c66bd8f 100644 --- a/argocd/applicationsets/00-mongodb.yaml +++ b/argocd/applicationsets/00-mongodb.yaml @@ -4,21 +4,22 @@ metadata: name: countly-mongodb namespace: argocd spec: + goTemplate: true + goTemplateOptions: + - missingkey=error generators: - - list: - elements: - - customer: helm-argocd - project: helm-argocd - server: https://34.135.156.216 - sizing: production - security: open + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml template: metadata: - name: "{{customer}}-mongodb" + name: "{{ .customer }}-mongodb" annotations: argocd.argoproj.io/sync-wave: "0" spec: - project: "{{project}}" + project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git targetRevision: deploy-test-ui @@ -26,16 +27,16 @@ spec: helm: releaseName: countly-mongodb valueFiles: - - "../../environments/{{customer}}/global.yaml" - - "../../profiles/sizing/{{sizing}}/mongodb.yaml" - - "../../profiles/security/{{security}}/mongodb.yaml" - - "../../environments/{{customer}}/mongodb.yaml" - - "../../environments/{{customer}}/secrets-mongodb.yaml" + - "../../environments/{{ .environment }}/global.yaml" + - "../../profiles/sizing/{{ .sizing }}/mongodb.yaml" + - "../../profiles/security/{{ .security }}/mongodb.yaml" + - "../../environments/{{ .environment }}/mongodb.yaml" + - "../../environments/{{ .environment }}/secrets-mongodb.yaml" parameters: - name: argocd.enabled value: "true" destination: - server: "{{server}}" + server: "{{ .server }}" namespace: mongodb syncPolicy: automated: diff --git a/argocd/applicationsets/01-clickhouse.yaml b/argocd/applicationsets/01-clickhouse.yaml index 637416a..a979d64 100644 --- a/argocd/applicationsets/01-clickhouse.yaml +++ b/argocd/applicationsets/01-clickhouse.yaml @@ -4,21 +4,22 @@ metadata: name: countly-clickhouse namespace: argocd spec: + goTemplate: true + goTemplateOptions: + - missingkey=error generators: - - list: - elements: - - customer: helm-argocd - project: helm-argocd - server: https://34.135.156.216 - sizing: production - security: open + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml template: metadata: - name: "{{customer}}-clickhouse" + name: "{{ .customer }}-clickhouse" annotations: argocd.argoproj.io/sync-wave: "0" spec: - project: "{{project}}" + project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git targetRevision: deploy-test-ui @@ -26,16 +27,16 @@ spec: helm: releaseName: countly-clickhouse valueFiles: - - "../../environments/{{customer}}/global.yaml" - - "../../profiles/sizing/{{sizing}}/clickhouse.yaml" - - "../../profiles/security/{{security}}/clickhouse.yaml" - - "../../environments/{{customer}}/clickhouse.yaml" - - "../../environments/{{customer}}/secrets-clickhouse.yaml" + - "../../environments/{{ .environment }}/global.yaml" + - "../../profiles/sizing/{{ .sizing }}/clickhouse.yaml" + - "../../profiles/security/{{ .security }}/clickhouse.yaml" + - "../../environments/{{ .environment }}/clickhouse.yaml" + - "../../environments/{{ .environment }}/secrets-clickhouse.yaml" parameters: - name: argocd.enabled value: "true" destination: - server: "{{server}}" + server: "{{ .server }}" namespace: clickhouse syncPolicy: automated: diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml index 13a7baa..fa6e542 100644 --- a/argocd/applicationsets/02-kafka.yaml +++ b/argocd/applicationsets/02-kafka.yaml @@ -4,23 +4,22 @@ metadata: name: countly-kafka namespace: argocd spec: + goTemplate: true + goTemplateOptions: + - missingkey=error generators: - - list: - elements: - - customer: helm-argocd - project: helm-argocd - server: https://34.135.156.216 - sizing: production - security: open - observability: full - kafkaConnect: balanced + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml template: metadata: - name: "{{customer}}-kafka" + name: "{{ .customer }}-kafka" annotations: argocd.argoproj.io/sync-wave: "5" spec: - project: "{{project}}" + project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git targetRevision: deploy-test-ui @@ -28,18 +27,18 @@ spec: helm: releaseName: countly-kafka valueFiles: - - "../../environments/{{customer}}/global.yaml" - - "../../profiles/sizing/{{sizing}}/kafka.yaml" - - "../../profiles/kafka-connect/{{kafkaConnect}}/kafka.yaml" - - "../../profiles/observability/{{observability}}/kafka.yaml" - - "../../profiles/security/{{security}}/kafka.yaml" - - "../../environments/{{customer}}/kafka.yaml" - - "../../environments/{{customer}}/secrets-kafka.yaml" + - "../../environments/{{ .environment }}/global.yaml" + - "../../profiles/sizing/{{ .sizing }}/kafka.yaml" + - "../../profiles/kafka-connect/{{ .kafkaConnect }}/kafka.yaml" + - "../../profiles/observability/{{ .observability }}/kafka.yaml" + - "../../profiles/security/{{ .security }}/kafka.yaml" + - "../../environments/{{ .environment }}/kafka.yaml" + - "../../environments/{{ .environment }}/secrets-kafka.yaml" parameters: - name: argocd.enabled value: "true" destination: - server: "{{server}}" + server: "{{ .server }}" namespace: kafka syncPolicy: automated: diff --git a/argocd/applicationsets/03-countly.yaml b/argocd/applicationsets/03-countly.yaml index 63c5a71..6eabe2e 100644 --- a/argocd/applicationsets/03-countly.yaml +++ b/argocd/applicationsets/03-countly.yaml @@ -4,23 +4,22 @@ metadata: name: countly-app namespace: argocd spec: + goTemplate: true + goTemplateOptions: + - missingkey=error generators: - - list: - elements: - - customer: helm-argocd - project: helm-argocd - server: https://34.135.156.216 - sizing: production - security: open - tls: letsencrypt - observability: full + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml template: metadata: - name: "{{customer}}-countly" + name: "{{ .customer }}-countly" annotations: argocd.argoproj.io/sync-wave: "10" spec: - project: "{{project}}" + project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git targetRevision: deploy-test-ui @@ -28,18 +27,18 @@ spec: helm: releaseName: countly valueFiles: - - "../../environments/{{customer}}/global.yaml" - - "../../profiles/sizing/{{sizing}}/countly.yaml" - - "../../profiles/tls/{{tls}}/countly.yaml" - - "../../profiles/observability/{{observability}}/countly.yaml" - - "../../profiles/security/{{security}}/countly.yaml" - - "../../environments/{{customer}}/countly.yaml" - - "../../environments/{{customer}}/secrets-countly.yaml" + - "../../environments/{{ .environment }}/global.yaml" + - "../../profiles/sizing/{{ .sizing }}/countly.yaml" + - "../../profiles/tls/{{ .tls }}/countly.yaml" + - "../../profiles/observability/{{ .observability }}/countly.yaml" + - "../../profiles/security/{{ .security }}/countly.yaml" + - "../../environments/{{ .environment }}/countly.yaml" + - "../../environments/{{ .environment }}/secrets-countly.yaml" parameters: - name: argocd.enabled value: "true" destination: - server: "{{server}}" + server: "{{ .server }}" namespace: countly syncPolicy: automated: diff --git a/argocd/applicationsets/04-observability.yaml b/argocd/applicationsets/04-observability.yaml index 3552800..7cd7712 100644 --- a/argocd/applicationsets/04-observability.yaml +++ b/argocd/applicationsets/04-observability.yaml @@ -4,22 +4,22 @@ metadata: name: countly-observability namespace: argocd spec: + goTemplate: true + goTemplateOptions: + - missingkey=error generators: - - list: - elements: - - customer: helm-argocd - project: helm-argocd - server: https://34.135.156.216 - sizing: production - security: open - observability: full + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml template: metadata: - name: "{{customer}}-observability" + name: "{{ .customer }}-observability" annotations: argocd.argoproj.io/sync-wave: "15" spec: - project: "{{project}}" + project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git targetRevision: deploy-test-ui @@ -27,17 +27,17 @@ spec: helm: releaseName: countly-observability valueFiles: - - "../../environments/{{customer}}/global.yaml" - - "../../profiles/sizing/{{sizing}}/observability.yaml" - - "../../profiles/observability/{{observability}}/observability.yaml" - - "../../profiles/security/{{security}}/observability.yaml" - - "../../environments/{{customer}}/observability.yaml" - - "../../environments/{{customer}}/secrets-observability.yaml" + - "../../environments/{{ .environment }}/global.yaml" + - "../../profiles/sizing/{{ .sizing }}/observability.yaml" + - "../../profiles/observability/{{ .observability }}/observability.yaml" + - "../../profiles/security/{{ .security }}/observability.yaml" + - "../../environments/{{ .environment }}/observability.yaml" + - "../../environments/{{ .environment }}/secrets-observability.yaml" parameters: - name: argocd.enabled value: "true" destination: - server: "{{server}}" + server: "{{ .server }}" namespace: observability syncPolicy: automated: diff --git a/argocd/customers/helm-argocd.yaml b/argocd/customers/helm-argocd.yaml new file mode 100644 index 0000000..91a89c1 --- /dev/null +++ b/argocd/customers/helm-argocd.yaml @@ -0,0 +1,9 @@ +customer: helm-argocd +environment: helm-argocd +project: countly-customers +server: https://34.135.156.216 +sizing: production +security: open +tls: letsencrypt +observability: full +kafkaConnect: balanced diff --git a/argocd/operators/00-cert-manager.yaml b/argocd/operators/00-cert-manager.yaml index 4694ca4..93b6502 100644 --- a/argocd/operators/00-cert-manager.yaml +++ b/argocd/operators/00-cert-manager.yaml @@ -1,29 +1,41 @@ apiVersion: argoproj.io/v1alpha1 -kind: Application +kind: ApplicationSet metadata: - name: cert-manager + name: customer-cert-manager namespace: argocd - annotations: - argocd.argoproj.io/sync-wave: "-30" spec: - project: default - source: - repoURL: https://charts.jetstack.io - chart: cert-manager - targetRevision: v1.17.2 - helm: - releaseName: cert-manager - parameters: - - name: installCRDs - value: "true" - destination: - server: https://34.135.156.216 - namespace: cert-manager - syncPolicy: - automated: - prune: true - selfHeal: true - syncOptions: - - CreateNamespace=true - - ServerSideApply=true - + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-cert-manager" + annotations: + argocd.argoproj.io/sync-wave: "-30" + spec: + project: default + source: + repoURL: https://charts.jetstack.io + chart: cert-manager + targetRevision: v1.17.2 + helm: + releaseName: cert-manager + parameters: + - name: installCRDs + value: "true" + destination: + server: "{{ .server }}" + namespace: cert-manager + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true diff --git a/argocd/operators/01-mongodb-crds.yaml b/argocd/operators/01-mongodb-crds.yaml index 8c597a3..01aff45 100644 --- a/argocd/operators/01-mongodb-crds.yaml +++ b/argocd/operators/01-mongodb-crds.yaml @@ -1,26 +1,38 @@ apiVersion: argoproj.io/v1alpha1 -kind: Application +kind: ApplicationSet metadata: - name: mongodb-crds + name: customer-mongodb-crds namespace: argocd - annotations: - argocd.argoproj.io/sync-wave: "-29" spec: - project: default - source: - repoURL: https://github.com/mongodb/mongodb-kubernetes.git - targetRevision: "1.7.0" - path: public - directory: - include: crds.yaml - destination: - server: https://34.135.156.216 - namespace: mongodb - syncPolicy: - automated: - prune: true - selfHeal: true - syncOptions: - - CreateNamespace=true - - ServerSideApply=true - + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-mongodb-crds" + annotations: + argocd.argoproj.io/sync-wave: "-29" + spec: + project: default + source: + repoURL: https://github.com/mongodb/mongodb-kubernetes.git + targetRevision: "1.7.0" + path: public + directory: + include: crds.yaml + destination: + server: "{{ .server }}" + namespace: mongodb + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true diff --git a/argocd/operators/02-mongodb-operator.yaml b/argocd/operators/02-mongodb-operator.yaml index 37cb5b8..730ee95 100644 --- a/argocd/operators/02-mongodb-operator.yaml +++ b/argocd/operators/02-mongodb-operator.yaml @@ -1,30 +1,42 @@ apiVersion: argoproj.io/v1alpha1 -kind: Application +kind: ApplicationSet metadata: - name: mongodb-kubernetes-operator + name: customer-mongodb-kubernetes-operator namespace: argocd - annotations: - argocd.argoproj.io/sync-wave: "-28" spec: - project: default - source: - repoURL: https://mongodb.github.io/helm-charts - chart: mongodb-kubernetes - targetRevision: 1.7.0 - helm: - releaseName: mongodb-kubernetes-operator - valuesObject: - operator: - watchedResources: - - mongodbcommunity - destination: - server: https://34.135.156.216 - namespace: mongodb - syncPolicy: - automated: - prune: true - selfHeal: true - syncOptions: - - CreateNamespace=true - - ServerSideApply=true - + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-mongodb-kubernetes-operator" + annotations: + argocd.argoproj.io/sync-wave: "-28" + spec: + project: default + source: + repoURL: https://mongodb.github.io/helm-charts + chart: mongodb-kubernetes + targetRevision: 1.7.0 + helm: + releaseName: mongodb-kubernetes-operator + valuesObject: + operator: + watchedResources: + - mongodbcommunity + destination: + server: "{{ .server }}" + namespace: mongodb + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true diff --git a/argocd/operators/03-clickhouse-operator.yaml b/argocd/operators/03-clickhouse-operator.yaml index 6700fe4..099b9cc 100644 --- a/argocd/operators/03-clickhouse-operator.yaml +++ b/argocd/operators/03-clickhouse-operator.yaml @@ -1,29 +1,41 @@ apiVersion: argoproj.io/v1alpha1 -kind: Application +kind: ApplicationSet metadata: - name: clickhouse-operator + name: customer-clickhouse-operator namespace: argocd - annotations: - argocd.argoproj.io/sync-wave: "-27" spec: - project: default - source: - repoURL: ghcr.io/clickhouse - chart: clickhouse-operator-helm - targetRevision: 0.0.2 - helm: - releaseName: clickhouse-operator - valuesObject: - certManager: - install: false - destination: - server: https://34.135.156.216 - namespace: clickhouse-operator-system - syncPolicy: - automated: - prune: true - selfHeal: true - syncOptions: - - CreateNamespace=true - - ServerSideApply=true - + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-clickhouse-operator" + annotations: + argocd.argoproj.io/sync-wave: "-27" + spec: + project: default + source: + repoURL: ghcr.io/clickhouse + chart: clickhouse-operator-helm + targetRevision: 0.0.2 + helm: + releaseName: clickhouse-operator + valuesObject: + certManager: + install: false + destination: + server: "{{ .server }}" + namespace: clickhouse-operator-system + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true diff --git a/argocd/operators/04-strimzi-operator.yaml b/argocd/operators/04-strimzi-operator.yaml index e3d2ac9..58b3805 100644 --- a/argocd/operators/04-strimzi-operator.yaml +++ b/argocd/operators/04-strimzi-operator.yaml @@ -1,26 +1,38 @@ apiVersion: argoproj.io/v1alpha1 -kind: Application +kind: ApplicationSet metadata: - name: strimzi-kafka-operator + name: customer-strimzi-kafka-operator namespace: argocd - annotations: - argocd.argoproj.io/sync-wave: "-26" spec: - project: default - source: - repoURL: https://strimzi.io/charts/ - chart: strimzi-kafka-operator - targetRevision: 0.51.0 - helm: - releaseName: strimzi-kafka-operator - destination: - server: https://34.135.156.216 - namespace: kafka - syncPolicy: - automated: - prune: true - selfHeal: true - syncOptions: - - CreateNamespace=true - - ServerSideApply=true - + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-strimzi-kafka-operator" + annotations: + argocd.argoproj.io/sync-wave: "-26" + spec: + project: default + source: + repoURL: https://strimzi.io/charts/ + chart: strimzi-kafka-operator + targetRevision: 0.51.0 + helm: + releaseName: strimzi-kafka-operator + destination: + server: "{{ .server }}" + namespace: kafka + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true diff --git a/argocd/operators/05-nginx-ingress.yaml b/argocd/operators/05-nginx-ingress.yaml index b4843cb..3510b6e 100644 --- a/argocd/operators/05-nginx-ingress.yaml +++ b/argocd/operators/05-nginx-ingress.yaml @@ -1,64 +1,77 @@ apiVersion: argoproj.io/v1alpha1 -kind: Application +kind: ApplicationSet metadata: - name: nginx-ingress + name: customer-nginx-ingress namespace: argocd - annotations: - argocd.argoproj.io/sync-wave: "-25" spec: - project: default - sources: - - repoURL: https://helm.nginx.com/stable - chart: nginx-ingress - targetRevision: 2.1.0 - helm: - releaseName: nginx-ingress - valueFiles: - - $values/nginx-ingress-values.yaml - - repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui - ref: values - destination: - server: https://34.135.156.216 - namespace: ingress-nginx - syncPolicy: - automated: - prune: true - selfHeal: true - syncOptions: - - CreateNamespace=true - - ServerSideApply=true - - RespectIgnoreDifferences=true - ignoreDifferences: - - group: apiextensions.k8s.io - kind: CustomResourceDefinition - name: apdoslogconfs.appprotectdos.f5.com - jsonPointers: - - /spec/preserveUnknownFields - - group: apiextensions.k8s.io - kind: CustomResourceDefinition - name: apdospolicies.appprotectdos.f5.com - jsonPointers: - - /spec/preserveUnknownFields - - group: apiextensions.k8s.io - kind: CustomResourceDefinition - name: aplogconfs.appprotect.f5.com - jsonPointers: - - /spec/preserveUnknownFields - - group: apiextensions.k8s.io - kind: CustomResourceDefinition - name: appolicies.appprotect.f5.com - jsonPointers: - - /spec/preserveUnknownFields - - group: apiextensions.k8s.io - kind: CustomResourceDefinition - name: apusersigs.appprotect.f5.com - jsonPointers: - - /spec/preserveUnknownFields - - group: "" - kind: Service - name: nginx-ingress-controller - namespace: ingress-nginx - jsonPointers: - - /metadata/annotations/cloud.google.com~1neg - - /spec/healthCheckNodePort + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-nginx-ingress" + annotations: + argocd.argoproj.io/sync-wave: "-25" + spec: + project: default + sources: + - repoURL: https://helm.nginx.com/stable + chart: nginx-ingress + targetRevision: 2.1.0 + helm: + releaseName: nginx-ingress + valueFiles: + - $values/nginx-ingress-values.yaml + - repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + ref: values + destination: + server: "{{ .server }}" + namespace: ingress-nginx + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + - RespectIgnoreDifferences=true + ignoreDifferences: + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: apdoslogconfs.appprotectdos.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: apdospolicies.appprotectdos.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: aplogconfs.appprotect.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: appolicies.appprotect.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: apiextensions.k8s.io + kind: CustomResourceDefinition + name: apusersigs.appprotect.f5.com + jsonPointers: + - /spec/preserveUnknownFields + - group: "" + kind: Service + name: nginx-ingress-controller + namespace: ingress-nginx + jsonPointers: + - /metadata/annotations/cloud.google.com~1neg + - /spec/healthCheckNodePort diff --git a/argocd/operators/06-letsencrypt-prod-issuer-app.yaml b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml index 3373d6e..9a10404 100644 --- a/argocd/operators/06-letsencrypt-prod-issuer-app.yaml +++ b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml @@ -1,24 +1,37 @@ apiVersion: argoproj.io/v1alpha1 -kind: Application +kind: ApplicationSet metadata: - name: letsencrypt-prod-issuer + name: customer-letsencrypt-prod-issuer namespace: argocd - annotations: - argocd.argoproj.io/sync-wave: "-24" spec: - project: default - source: - repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui - path: argocd/operator-manifests/letsencrypt-prod-issuer - directory: - recurse: true - destination: - server: https://34.135.156.216 - namespace: cert-manager - syncPolicy: - automated: - prune: true - selfHeal: true - syncOptions: - - ServerSideApply=true + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-letsencrypt-prod-issuer" + annotations: + argocd.argoproj.io/sync-wave: "-24" + spec: + project: default + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + path: argocd/operator-manifests/letsencrypt-prod-issuer + directory: + recurse: true + destination: + server: "{{ .server }}" + namespace: cert-manager + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - ServerSideApply=true diff --git a/argocd/projects/customers.yaml b/argocd/projects/customers.yaml index 5f1ef07..d48d4f7 100644 --- a/argocd/projects/customers.yaml +++ b/argocd/projects/customers.yaml @@ -1,24 +1,24 @@ apiVersion: argoproj.io/v1alpha1 kind: AppProject metadata: - name: helm-argocd + name: countly-customers namespace: argocd spec: - description: "Countly deployment for helm-argocd test environment" + description: "Shared AppProject for GitOps-managed Countly customer environments" sourceRepos: - '*' destinations: - - server: https://34.135.156.216 + - server: '*' namespace: mongodb - - server: https://34.135.156.216 + - server: '*' namespace: clickhouse - - server: https://34.135.156.216 + - server: '*' namespace: kafka - - server: https://34.135.156.216 + - server: '*' namespace: countly - - server: https://34.135.156.216 + - server: '*' namespace: observability - - server: https://34.135.156.216 + - server: '*' namespace: countly-migration clusterResourceWhitelist: - group: "" diff --git a/argocd/root-application.yaml b/argocd/root-application.yaml index b123f37..2dad886 100644 --- a/argocd/root-application.yaml +++ b/argocd/root-application.yaml @@ -11,7 +11,7 @@ spec: path: argocd directory: recurse: true - exclude: operator-manifests/** + exclude: "operator-manifests/**,customers/**" destination: server: https://kubernetes.default.svc namespace: argocd diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh new file mode 100755 index 0000000..da60941 --- /dev/null +++ b/scripts/new-argocd-customer.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash + +set -euo pipefail + +usage() { + cat <<'EOF' +Usage: + scripts/new-argocd-customer.sh [project] + +Example: + scripts/new-argocd-customer.sh acme https://1.2.3.4 acme.count.ly + +This command: + 1. copies environments/reference to environments/ + 2. updates environments//global.yaml with the hostname and default profiles + 3. creates argocd/customers/.yaml for the ApplicationSets + +Defaults: + project countly-customers + sizing production + security open + tls letsencrypt + observability full + kafkaConnect balanced +EOF +} + +if [[ $# -lt 3 || $# -gt 4 ]]; then + usage + exit 1 +fi + +customer="$1" +server="$2" +hostname="$3" +project="${4:-countly-customers}" + +repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +env_dir="${repo_root}/environments/${customer}" +customer_file="${repo_root}/argocd/customers/${customer}.yaml" + +if [[ -e "${env_dir}" ]]; then + echo "Environment already exists: ${env_dir}" >&2 + exit 1 +fi + +if [[ -e "${customer_file}" ]]; then + echo "Customer metadata already exists: ${customer_file}" >&2 + exit 1 +fi + +cp -R "${repo_root}/environments/reference" "${env_dir}" + +cat > "${env_dir}/global.yaml" < "${customer_file}" < Date: Mon, 30 Mar 2026 17:52:10 +0530 Subject: [PATCH 17/79] Make customer selectors authoritative in Argo CD --- argocd/applicationsets/00-mongodb.yaml | 4 ++++ argocd/applicationsets/01-clickhouse.yaml | 4 ++++ argocd/applicationsets/02-kafka.yaml | 8 ++++++++ argocd/applicationsets/03-countly.yaml | 10 ++++++++++ argocd/applicationsets/04-observability.yaml | 8 +++++++- charts/noop/Chart.yaml | 6 ++++++ 6 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 charts/noop/Chart.yaml diff --git a/argocd/applicationsets/00-mongodb.yaml b/argocd/applicationsets/00-mongodb.yaml index c66bd8f..a842718 100644 --- a/argocd/applicationsets/00-mongodb.yaml +++ b/argocd/applicationsets/00-mongodb.yaml @@ -35,6 +35,10 @@ spec: parameters: - name: argocd.enabled value: "true" + - name: global.sizing + value: "{{ .sizing }}" + - name: global.security + value: "{{ .security }}" destination: server: "{{ .server }}" namespace: mongodb diff --git a/argocd/applicationsets/01-clickhouse.yaml b/argocd/applicationsets/01-clickhouse.yaml index a979d64..4b3ae40 100644 --- a/argocd/applicationsets/01-clickhouse.yaml +++ b/argocd/applicationsets/01-clickhouse.yaml @@ -35,6 +35,10 @@ spec: parameters: - name: argocd.enabled value: "true" + - name: global.sizing + value: "{{ .sizing }}" + - name: global.security + value: "{{ .security }}" destination: server: "{{ .server }}" namespace: clickhouse diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml index fa6e542..dbf3691 100644 --- a/argocd/applicationsets/02-kafka.yaml +++ b/argocd/applicationsets/02-kafka.yaml @@ -37,6 +37,14 @@ spec: parameters: - name: argocd.enabled value: "true" + - name: global.sizing + value: "{{ .sizing }}" + - name: global.security + value: "{{ .security }}" + - name: global.observability + value: "{{ .observability }}" + - name: global.kafkaConnect + value: "{{ .kafkaConnect }}" destination: server: "{{ .server }}" namespace: kafka diff --git a/argocd/applicationsets/03-countly.yaml b/argocd/applicationsets/03-countly.yaml index 6eabe2e..16ef359 100644 --- a/argocd/applicationsets/03-countly.yaml +++ b/argocd/applicationsets/03-countly.yaml @@ -37,6 +37,16 @@ spec: parameters: - name: argocd.enabled value: "true" + - name: global.sizing + value: "{{ .sizing }}" + - name: global.security + value: "{{ .security }}" + - name: global.observability + value: "{{ .observability }}" + - name: global.tls + value: "{{ .tls }}" + - name: global.kafkaConnect + value: "{{ .kafkaConnect }}" destination: server: "{{ .server }}" namespace: countly diff --git a/argocd/applicationsets/04-observability.yaml b/argocd/applicationsets/04-observability.yaml index 7cd7712..4a9eca7 100644 --- a/argocd/applicationsets/04-observability.yaml +++ b/argocd/applicationsets/04-observability.yaml @@ -23,7 +23,7 @@ spec: source: repoURL: https://github.com/Countly/helm.git targetRevision: deploy-test-ui - path: charts/countly-observability + path: '{{ if eq .observability "disabled" }}charts/noop{{ else }}charts/countly-observability{{ end }}' helm: releaseName: countly-observability valueFiles: @@ -36,6 +36,12 @@ spec: parameters: - name: argocd.enabled value: "true" + - name: global.sizing + value: "{{ .sizing }}" + - name: global.security + value: "{{ .security }}" + - name: global.observability + value: "{{ .observability }}" destination: server: "{{ .server }}" namespace: observability diff --git a/charts/noop/Chart.yaml b/charts/noop/Chart.yaml new file mode 100644 index 0000000..0969edd --- /dev/null +++ b/charts/noop/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: noop +description: No-op chart used when a GitOps-managed component is intentionally disabled. +type: application +version: 0.1.0 +appVersion: "0.1.0" From 1a385c14f2e55b43a487b5821655cb23687bed8b Mon Sep 17 00:00:00 2001 From: ihaardik Date: Mon, 30 Mar 2026 22:18:42 +0530 Subject: [PATCH 18/79] Add helm-argo-test direct-secret customer --- .gitignore | 1 + argocd/customers/helm-argo-test.yaml | 10 +++++++ argocd/customers/helm-argocd.yaml | 9 ------- environments/helm-argo-test/clickhouse.yaml | 5 ++++ environments/helm-argo-test/countly.yaml | 7 +++++ environments/helm-argo-test/global.yaml | 26 +++++++++++++++++++ environments/helm-argo-test/kafka.yaml | 7 +++++ environments/helm-argo-test/mongodb.yaml | 5 ++++ .../helm-argo-test/observability.yaml | 2 ++ .../helm-argo-test/secrets-clickhouse.yaml | 6 +++++ .../helm-argo-test/secrets-countly.yaml | 16 ++++++++++++ .../helm-argo-test/secrets-kafka.yaml | 7 +++++ .../helm-argo-test/secrets-mongodb.yaml | 13 ++++++++++ .../helm-argo-test/secrets-observability.yaml | 1 + 14 files changed, 106 insertions(+), 9 deletions(-) create mode 100644 argocd/customers/helm-argo-test.yaml delete mode 100644 argocd/customers/helm-argocd.yaml create mode 100644 environments/helm-argo-test/clickhouse.yaml create mode 100644 environments/helm-argo-test/countly.yaml create mode 100644 environments/helm-argo-test/global.yaml create mode 100644 environments/helm-argo-test/kafka.yaml create mode 100644 environments/helm-argo-test/mongodb.yaml create mode 100644 environments/helm-argo-test/observability.yaml create mode 100644 environments/helm-argo-test/secrets-clickhouse.yaml create mode 100644 environments/helm-argo-test/secrets-countly.yaml create mode 100644 environments/helm-argo-test/secrets-kafka.yaml create mode 100644 environments/helm-argo-test/secrets-mongodb.yaml create mode 100644 environments/helm-argo-test/secrets-observability.yaml diff --git a/.gitignore b/.gitignore index 8809778..0808a78 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ secrets-*.yaml # Exception: reference environment templates (contain no real secrets) !environments/reference/secrets-*.yaml +!environments/helm-argo-test/secrets-*.yaml # Helmfile state helmfile.lock diff --git a/argocd/customers/helm-argo-test.yaml b/argocd/customers/helm-argo-test.yaml new file mode 100644 index 0000000..bf53947 --- /dev/null +++ b/argocd/customers/helm-argo-test.yaml @@ -0,0 +1,10 @@ +customer: helm-argo-test +environment: helm-argo-test +project: countly-customers +server: https://34.60.110.103 +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +secretBackend: direct diff --git a/argocd/customers/helm-argocd.yaml b/argocd/customers/helm-argocd.yaml deleted file mode 100644 index 91a89c1..0000000 --- a/argocd/customers/helm-argocd.yaml +++ /dev/null @@ -1,9 +0,0 @@ -customer: helm-argocd -environment: helm-argocd -project: countly-customers -server: https://34.135.156.216 -sizing: production -security: open -tls: letsencrypt -observability: full -kafkaConnect: balanced diff --git a/environments/helm-argo-test/clickhouse.yaml b/environments/helm-argo-test/clickhouse.yaml new file mode 100644 index 0000000..e06126c --- /dev/null +++ b/environments/helm-argo-test/clickhouse.yaml @@ -0,0 +1,5 @@ +# ClickHouse overrides for helm-argo-test +# +# Runtime sizing comes from: +# - profiles/sizing/tier1/clickhouse.yaml +# - profiles/security/open/clickhouse.yaml diff --git a/environments/helm-argo-test/countly.yaml b/environments/helm-argo-test/countly.yaml new file mode 100644 index 0000000..45f8607 --- /dev/null +++ b/environments/helm-argo-test/countly.yaml @@ -0,0 +1,7 @@ +# Countly overrides for helm-argo-test +# +# Most runtime sizing comes from: +# - profiles/sizing/tier1/countly.yaml +# - profiles/tls/letsencrypt/countly.yaml +# - profiles/observability/disabled/countly.yaml +# - profiles/security/open/countly.yaml diff --git a/environments/helm-argo-test/global.yaml b/environments/helm-argo-test/global.yaml new file mode 100644 index 0000000..b0d3ff6 --- /dev/null +++ b/environments/helm-argo-test/global.yaml @@ -0,0 +1,26 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + storageClass: "" + imagePullSecrets: [] + +ingress: + hostname: helm-argo-test.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/helm-argo-test/kafka.yaml b/environments/helm-argo-test/kafka.yaml new file mode 100644 index 0000000..2d89d18 --- /dev/null +++ b/environments/helm-argo-test/kafka.yaml @@ -0,0 +1,7 @@ +# Kafka overrides for helm-argo-test +# +# Runtime sizing comes from: +# - profiles/sizing/tier1/kafka.yaml +# - profiles/kafka-connect/balanced/kafka.yaml +# - profiles/observability/disabled/kafka.yaml +# - profiles/security/open/kafka.yaml diff --git a/environments/helm-argo-test/mongodb.yaml b/environments/helm-argo-test/mongodb.yaml new file mode 100644 index 0000000..4939769 --- /dev/null +++ b/environments/helm-argo-test/mongodb.yaml @@ -0,0 +1,5 @@ +# MongoDB overrides for helm-argo-test +# +# Runtime sizing comes from: +# - profiles/sizing/tier1/mongodb.yaml +# - profiles/security/open/mongodb.yaml diff --git a/environments/helm-argo-test/observability.yaml b/environments/helm-argo-test/observability.yaml new file mode 100644 index 0000000..393fd39 --- /dev/null +++ b/environments/helm-argo-test/observability.yaml @@ -0,0 +1,2 @@ +mode: disabled +clusterName: helm-argo-test diff --git a/environments/helm-argo-test/secrets-clickhouse.yaml b/environments/helm-argo-test/secrets-clickhouse.yaml new file mode 100644 index 0000000..02f9fc6 --- /dev/null +++ b/environments/helm-argo-test/secrets-clickhouse.yaml @@ -0,0 +1,6 @@ +# ClickHouse secrets — fill these in before first deploy. +auth: + defaultUserPassword: + password: "helm-argo-test-clickhouse-2026" + externalSecret: + enabled: false diff --git a/environments/helm-argo-test/secrets-countly.yaml b/environments/helm-argo-test/secrets-countly.yaml new file mode 100644 index 0000000..eeafe3f --- /dev/null +++ b/environments/helm-argo-test/secrets-countly.yaml @@ -0,0 +1,16 @@ +# Countly secrets — fill these in before first deploy. +# This file is intentionally using direct Helm values for initial setup. +secrets: + mode: values + common: + encryptionReportsKey: "helm-argo-test-reports-key-2026" + webSessionSecret: "helm-argo-test-web-session-2026" + passwordSecret: "helm-argo-test-password-secret-2026" + clickhouse: + username: "default" + password: "helm-argo-test-clickhouse-2026" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + mongodb: + password: "helm-argo-test-mongo-2026" diff --git a/environments/helm-argo-test/secrets-kafka.yaml b/environments/helm-argo-test/secrets-kafka.yaml new file mode 100644 index 0000000..fb5a94a --- /dev/null +++ b/environments/helm-argo-test/secrets-kafka.yaml @@ -0,0 +1,7 @@ +# Kafka secrets — fill these in before first deploy. +kafkaConnect: + clickhouse: + username: "default" + password: "helm-argo-test-clickhouse-2026" + externalSecret: + enabled: false diff --git a/environments/helm-argo-test/secrets-mongodb.yaml b/environments/helm-argo-test/secrets-mongodb.yaml new file mode 100644 index 0000000..d2f4271 --- /dev/null +++ b/environments/helm-argo-test/secrets-mongodb.yaml @@ -0,0 +1,13 @@ +# MongoDB secrets — fill these in before first deploy. +users: + app: + password: "helm-argo-test-mongo-2026" + createSecret: true + externalSecret: + enabled: false + metrics: + enabled: true + password: "helm-argo-test-metrics-2026" + createSecret: true + externalSecret: + enabled: false diff --git a/environments/helm-argo-test/secrets-observability.yaml b/environments/helm-argo-test/secrets-observability.yaml new file mode 100644 index 0000000..b7ad3b1 --- /dev/null +++ b/environments/helm-argo-test/secrets-observability.yaml @@ -0,0 +1 @@ +# Observability is disabled for helm-argo-test. From 608fba055077246563d2b9ca0994deb29cf61633 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Mon, 30 Mar 2026 22:24:18 +0530 Subject: [PATCH 19/79] Fix bootstrap exclude patterns --- argocd/root-application.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/argocd/root-application.yaml b/argocd/root-application.yaml index 2dad886..6e6e80a 100644 --- a/argocd/root-application.yaml +++ b/argocd/root-application.yaml @@ -11,7 +11,7 @@ spec: path: argocd directory: recurse: true - exclude: "operator-manifests/**,customers/**" + exclude: "{operator-manifests/**,customers/**}" destination: server: https://kubernetes.default.svc namespace: argocd From c5e0de159a580634cd2ec2d177245616a8e8310e Mon Sep 17 00:00:00 2001 From: ihaardik Date: Mon, 30 Mar 2026 23:42:29 +0530 Subject: [PATCH 20/79] Make migration optional in customer appsets --- argocd/applicationsets/02-kafka.yaml | 2 + argocd/applicationsets/05-migration.yaml | 52 +++++++++++++++++++ argocd/customers/helm-argo-test.yaml | 1 + environments/helm-argo-test/migration.yaml | 1 + .../helm-argo-test/secrets-migration.yaml | 1 + environments/reference/migration.yaml | 3 ++ environments/reference/secrets-migration.yaml | 2 + scripts/new-argocd-customer.sh | 2 + 8 files changed, 64 insertions(+) create mode 100644 argocd/applicationsets/05-migration.yaml create mode 100644 environments/helm-argo-test/migration.yaml create mode 100644 environments/helm-argo-test/secrets-migration.yaml create mode 100644 environments/reference/migration.yaml create mode 100644 environments/reference/secrets-migration.yaml diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml index dbf3691..af9c0fe 100644 --- a/argocd/applicationsets/02-kafka.yaml +++ b/argocd/applicationsets/02-kafka.yaml @@ -45,6 +45,8 @@ spec: value: "{{ .observability }}" - name: global.kafkaConnect value: "{{ .kafkaConnect }}" + - name: kafkaConnect.connectors[0].enabled + value: '{{ if eq .migration "enabled" }}true{{ else }}false{{ end }}' destination: server: "{{ .server }}" namespace: kafka diff --git a/argocd/applicationsets/05-migration.yaml b/argocd/applicationsets/05-migration.yaml new file mode 100644 index 0000000..9b97e45 --- /dev/null +++ b/argocd/applicationsets/05-migration.yaml @@ -0,0 +1,52 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: countly-migration + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: deploy-test-ui + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-migration" + annotations: + argocd.argoproj.io/sync-wave: "10" + spec: + project: "{{ .project }}" + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: deploy-test-ui + path: '{{ if eq .migration "enabled" }}charts/countly-migration{{ else }}charts/noop{{ end }}' + helm: + releaseName: countly-migration + valueFiles: + - "../../environments/{{ .environment }}/global.yaml" + - "../../environments/{{ .environment }}/migration.yaml" + - "../../environments/{{ .environment }}/secrets-migration.yaml" + parameters: + - name: argocd.enabled + value: "true" + destination: + server: "{{ .server }}" + namespace: countly-migration + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true + - RespectIgnoreDifferences=true + retry: + limit: 5 + backoff: + duration: 5s + factor: 2 + maxDuration: 3m diff --git a/argocd/customers/helm-argo-test.yaml b/argocd/customers/helm-argo-test.yaml index bf53947..79c405b 100644 --- a/argocd/customers/helm-argo-test.yaml +++ b/argocd/customers/helm-argo-test.yaml @@ -7,4 +7,5 @@ security: open tls: letsencrypt observability: disabled kafkaConnect: balanced +migration: disabled secretBackend: direct diff --git a/environments/helm-argo-test/migration.yaml b/environments/helm-argo-test/migration.yaml new file mode 100644 index 0000000..6cad86c --- /dev/null +++ b/environments/helm-argo-test/migration.yaml @@ -0,0 +1 @@ +# Migration is optional and disabled for helm-argo-test. diff --git a/environments/helm-argo-test/secrets-migration.yaml b/environments/helm-argo-test/secrets-migration.yaml new file mode 100644 index 0000000..6cad86c --- /dev/null +++ b/environments/helm-argo-test/secrets-migration.yaml @@ -0,0 +1 @@ +# Migration is optional and disabled for helm-argo-test. diff --git a/environments/reference/migration.yaml b/environments/reference/migration.yaml new file mode 100644 index 0000000..6fa760c --- /dev/null +++ b/environments/reference/migration.yaml @@ -0,0 +1,3 @@ +# Migration overrides for optional countly-migration app. +# Enable per customer by setting `migration: enabled` in argocd/customers/.yaml +# and then filling this file with environment-specific overrides as needed. diff --git a/environments/reference/secrets-migration.yaml b/environments/reference/secrets-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/reference/secrets-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index da60941..ecb72a9 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -22,6 +22,7 @@ Defaults: tls letsencrypt observability full kafkaConnect balanced + migration disabled EOF } @@ -90,6 +91,7 @@ security: open tls: letsencrypt observability: full kafkaConnect: balanced +migration: disabled EOF cat < Date: Mon, 30 Mar 2026 23:46:47 +0530 Subject: [PATCH 21/79] Disable drill connector via customer kafka values --- argocd/applicationsets/02-kafka.yaml | 2 -- environments/helm-argo-test/kafka.yaml | 45 ++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml index af9c0fe..dbf3691 100644 --- a/argocd/applicationsets/02-kafka.yaml +++ b/argocd/applicationsets/02-kafka.yaml @@ -45,8 +45,6 @@ spec: value: "{{ .observability }}" - name: global.kafkaConnect value: "{{ .kafkaConnect }}" - - name: kafkaConnect.connectors[0].enabled - value: '{{ if eq .migration "enabled" }}true{{ else }}false{{ end }}' destination: server: "{{ .server }}" namespace: kafka diff --git a/environments/helm-argo-test/kafka.yaml b/environments/helm-argo-test/kafka.yaml index 2d89d18..8ade5b9 100644 --- a/environments/helm-argo-test/kafka.yaml +++ b/environments/helm-argo-test/kafka.yaml @@ -5,3 +5,48 @@ # - profiles/kafka-connect/balanced/kafka.yaml # - profiles/observability/disabled/kafka.yaml # - profiles/security/open/kafka.yaml + +kafkaConnect: + connectors: + - name: ch-sink-drill-events + enabled: false + state: running + class: com.clickhouse.kafka.connect.ClickHouseSinkConnector + tasksMax: 1 + autoRestart: + enabled: true + maxRestarts: 10 + config: + topics: drill-events + topic2TableMap: "drill-events=drill_events" + hostname: "${env:CLICKHOUSE_HOST}" + port: "${env:CLICKHOUSE_PORT}" + ssl: "${env:CLICKHOUSE_SSL}" + database: "${env:CLICKHOUSE_DB}" + username: "${env:CLICKHOUSE_USER}" + password: "${env:CLICKHOUSE_PASSWORD}" + exactlyOnce: "${env:EXACTLY_ONCE}" + errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" + errors.tolerance: "${env:ERRORS_TOLERANCE}" + clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" + bypassRowBinary: "${env:BYPASS_ROW_BINARY}" + tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" + key.converter: "${env:KEY_CONVERTER}" + value.converter: "${env:VALUE_CONVERTER}" + value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" + consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" + consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" + consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" + consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" + consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" + consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" + consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" + consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" + consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" + connection.timeout: "60" + socket.timeout: "30000" + retry.count: "3" + connection.pool.size: "10" + healthcheck.enabled: "true" + healthcheck.interval: "10000" + dlq: {} From b0657ad88e6b2e8d5e52abf68b406aa4ef842d25 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 00:18:05 +0530 Subject: [PATCH 22/79] Remove helm-argo-test customer --- argocd/customers/helm-argo-test.yaml | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 argocd/customers/helm-argo-test.yaml diff --git a/argocd/customers/helm-argo-test.yaml b/argocd/customers/helm-argo-test.yaml deleted file mode 100644 index 79c405b..0000000 --- a/argocd/customers/helm-argo-test.yaml +++ /dev/null @@ -1,11 +0,0 @@ -customer: helm-argo-test -environment: helm-argo-test -project: countly-customers -server: https://34.60.110.103 -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled -secretBackend: direct From e5b382b4986ab6d32cb73e7f0750363027cb7a51 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 13:38:53 +0530 Subject: [PATCH 23/79] Add argo1 test customer --- .gitignore | 2 +- argocd/customers/argo1.yaml | 10 + environments/argo1/README.md | 69 +++ environments/argo1/clickhouse.yaml | 203 +++++++ environments/argo1/countly-tls.env | 7 + environments/argo1/countly.yaml | 570 ++++++++++++++++++ .../argo1/external-secrets.example.yaml | 36 ++ environments/argo1/global.yaml | 26 + environments/argo1/kafka.yaml | 345 +++++++++++ environments/argo1/migration.yaml | 3 + environments/argo1/mongodb.yaml | 144 +++++ environments/argo1/observability.yaml | 437 ++++++++++++++ environments/argo1/secrets-clickhouse.yaml | 4 + environments/argo1/secrets-countly.yaml | 15 + environments/argo1/secrets-kafka.yaml | 4 + environments/argo1/secrets-migration.yaml | 2 + environments/argo1/secrets-mongodb.yaml | 7 + environments/argo1/secrets-observability.yaml | 2 + environments/argo1/secrets.example.yaml | 42 ++ environments/argo1/secrets.sops.example.yaml | 21 + 20 files changed, 1948 insertions(+), 1 deletion(-) create mode 100644 argocd/customers/argo1.yaml create mode 100644 environments/argo1/README.md create mode 100644 environments/argo1/clickhouse.yaml create mode 100644 environments/argo1/countly-tls.env create mode 100644 environments/argo1/countly.yaml create mode 100644 environments/argo1/external-secrets.example.yaml create mode 100644 environments/argo1/global.yaml create mode 100644 environments/argo1/kafka.yaml create mode 100644 environments/argo1/migration.yaml create mode 100644 environments/argo1/mongodb.yaml create mode 100644 environments/argo1/observability.yaml create mode 100644 environments/argo1/secrets-clickhouse.yaml create mode 100644 environments/argo1/secrets-countly.yaml create mode 100644 environments/argo1/secrets-kafka.yaml create mode 100644 environments/argo1/secrets-migration.yaml create mode 100644 environments/argo1/secrets-mongodb.yaml create mode 100644 environments/argo1/secrets-observability.yaml create mode 100644 environments/argo1/secrets.example.yaml create mode 100644 environments/argo1/secrets.sops.example.yaml diff --git a/.gitignore b/.gitignore index 0808a78..a125120 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,7 @@ secrets-*.yaml # Exception: reference environment templates (contain no real secrets) !environments/reference/secrets-*.yaml -!environments/helm-argo-test/secrets-*.yaml +!environments/argo1/secrets-*.yaml # Helmfile state helmfile.lock diff --git a/argocd/customers/argo1.yaml b/argocd/customers/argo1.yaml new file mode 100644 index 0000000..1b785cb --- /dev/null +++ b/argocd/customers/argo1.yaml @@ -0,0 +1,10 @@ +customer: argo1 +environment: argo1 +project: countly-customers +server: https://35.226.153.84 +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/argo1/README.md b/environments/argo1/README.md new file mode 100644 index 0000000..12b374b --- /dev/null +++ b/environments/argo1/README.md @@ -0,0 +1,69 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + +3. Fill in required secrets in the chart-specific files: + - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `clickhouse.yaml` → `auth.defaultUserPassword.password` + - `kafka.yaml` → `kafkaConnect.clickhouse.password` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `secrets-mongodb.yaml` | MongoDB user passwords | +| `secrets-clickhouse.yaml` | ClickHouse auth password | +| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | +| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/argo1/clickhouse.yaml b/environments/argo1/clickhouse.yaml new file mode 100644 index 0000000..d7899d3 --- /dev/null +++ b/environments/argo1/clickhouse.yaml @@ -0,0 +1,203 @@ +# ============================================================================= +# ClickHouse Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-clickhouse/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Operator API Version --- +clickhouseOperator: + apiVersion: clickhouse.com/v1alpha1 + +# ============================================================================= +# Cluster Topology +# ============================================================================= +version: "26.2" +shards: 1 +replicas: 2 + +# ============================================================================= +# Images +# ============================================================================= +image: + server: clickhouse/clickhouse-server + keeper: clickhouse/clickhouse-keeper + +# ============================================================================= +# Database +# ============================================================================= +database: countly_drill + +# ============================================================================= +# Authentication +# ============================================================================= +auth: + # --- Default User Password --- + defaultUserPassword: + existingSecret: "" # Use an existing secret instead of creating one + secretName: clickhouse-default-password + key: password + password: "" # REQUIRED: ClickHouse default user password + + # --- Admin User (optional, separate from default) --- + adminUser: + enabled: false + # Precomputed SHA256 hex of the admin password (64 hex chars). + # Generate: echo -n 'your_password' | sha256sum | cut -d' ' -f1 + passwordSha256Hex: "" + +# ============================================================================= +# OpenTelemetry Server-Side Tracing +# ============================================================================= +# When enabled, ClickHouse logs spans to system.opentelemetry_span_log for +# queries arriving with W3C traceparent headers. +opentelemetry: + enabled: false + spanLog: + ttlDays: 7 + flushIntervalMs: 1000 + +# ============================================================================= +# Server +# ============================================================================= +server: + securityContext: + runAsNonRoot: true + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + + resources: + requests: + cpu: "1" + memory: "4Gi" + limits: + cpu: "2" + memory: "8Gi" + + persistence: + storageClass: "" + size: 50Gi + + settings: + maxConnections: 4096 + extraConfig: "" # Raw XML injected into server config + extraUsersConfig: "" # Raw XML injected into users config + prometheus: + enabled: true + port: 9363 + endpoint: /metrics + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 + +# ============================================================================= +# Keeper (ClickHouse Keeper for replication coordination) +# ============================================================================= +keeper: + replicas: 1 + + securityContext: + runAsNonRoot: true + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + + resources: + requests: + cpu: "250m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "1Gi" + + persistence: + storageClass: "" + size: 5Gi + + settings: + prometheus: + enabled: true + port: 9090 + endpoint: /metrics + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + +# ============================================================================= +# Pod Disruption Budgets +# ============================================================================= +podDisruptionBudget: + server: + enabled: false + maxUnavailable: 1 + keeper: + enabled: false + maxUnavailable: 1 + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + - kafka + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-analytics-namespace + # ports: + # - port: 8123 + # protocol: TCP + +# ============================================================================= +# Service Monitor (Prometheus Operator CRD) +# ============================================================================= +serviceMonitor: + enabled: false + interval: "15s" + serviceType: headless # headless = per-pod scraping, clusterIP = any-pod + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall diff --git a/environments/argo1/countly-tls.env b/environments/argo1/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/argo1/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/argo1/countly.yaml b/environments/argo1/countly.yaml new file mode 100644 index 0000000..4ba46fa --- /dev/null +++ b/environments/argo1/countly.yaml @@ -0,0 +1,570 @@ +# ============================================================================= +# Countly Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Service Account --- +serviceAccount: + create: true + name: "" # Auto-derived from release name if empty + annotations: {} + +# --- Image --- +image: + repository: gcr.io/countly-dev-313620/countly-unified + digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" + tag: "26.01" # Fallback when digest is empty + pullPolicy: IfNotPresent + +# --- Cross-Namespace References --- +clickhouseNamespace: clickhouse +kafkaNamespace: kafka +mongodbNamespace: mongodb + +# ============================================================================= +# Component: API +# ============================================================================= +api: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:api"] + port: 3001 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 120 + terminationGracePeriodSeconds: 120 + resources: + requests: + cpu: "1" + memory: "3.5Gi" + limits: + cpu: "1" + memory: "4Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 6 + metrics: + cpu: + averageUtilization: 70 + memory: + averageUtilization: 80 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 # 1-100, only used with type=preferred + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Frontend +# ============================================================================= +frontend: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:frontend"] + port: 6001 + healthCheck: + path: /ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + resources: + requests: + cpu: "1" + memory: "2.5Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 1 + metrics: + cpu: + averageUtilization: 80 + memory: {} + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Ingestor +# ============================================================================= +ingestor: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:ingestor"] + port: 3010 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 120 + terminationGracePeriodSeconds: 120 + resources: + requests: + cpu: "1" + memory: "3Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 12 + metrics: + cpu: + averageUtilization: 65 + memory: + averageUtilization: 75 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Aggregator +# ============================================================================= +aggregator: + enabled: true + replicaCount: 4 + command: ["npm", "run", "start:aggregator"] + port: 0 # No HTTP port exposed + healthCheck: {} # No HTTP health check (no port) + terminationGracePeriodSeconds: 60 + resources: + requests: + cpu: "1" + memory: "3.5Gi" + limits: + cpu: "2" + memory: "4Gi" + hpa: + enabled: true + minReplicas: 4 + maxReplicas: 8 + metrics: + cpu: + averageUtilization: 65 + memory: + averageUtilization: 65 + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 10 + periodSeconds: 60 + pdb: + enabled: true + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Job Server +# ============================================================================= +jobserver: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:jobserver"] + port: 3020 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + resources: + requests: + cpu: "1" + memory: "3Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 1 + metrics: + cpu: + averageUtilization: 80 + memory: + averageUtilization: 85 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: false + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Configuration (ConfigMaps) +# ============================================================================= +config: + # --- Common (shared by all components) --- + common: + NODE_ENV: production + COUNTLY_PLUGINS: "mobile,web,desktop,plugins,density,locale,browser,sources,views,logger,systemlogs,populator,reports,crashes,push,star-rating,slipping-away-users,compare,server-stats,dbviewer,crash_symbolication,crash-analytics,alerts,onboarding,consolidate,remote-config,hooks,dashboards,sdk,data-manager,guides,heatmaps,retention_segments,formulas,funnels,cohorts,ab-testing,performance-monitoring,config-transfer,data-migration,two-factor-auth,blocking,concurrent_users,revenue,activity-map,flows,surveys,event-timeline,drill,multi,active_users,ip-blocker,kafka,clickhouse" + COUNTLY_CONFIG__FILESTORAGE: gridfs + COUNTLY_CONFIG__DRILL_EVENTS_DRIVER: clickhouse + COUNTLY_CONFIG__SHARED_CONNECTION: "true" + COUNTLY_CONFIG__DATABASE_ADAPTERPREFERENCE: '["clickhouse","mongodb"]' + COUNTLY_CONFIG__DATABASE_ADAPTERS_MONGODB_ENABLED: "true" + COUNTLY_CONFIG__DATABASE_ADAPTERS_CLICKHOUSE_ENABLED: "true" + COUNTLY_CONFIG__DATABASE_FAILONCONNECTIONERROR: "true" + COUNTLY_CONFIG__EVENTSINK_SINKS: '["kafka"]' + COUNTLY_CONFIG__RELOADCONFIGAFTER: "10000" + + # --- API --- + api: + COUNTLY_CONTAINER: api + COUNTLY_CONFIG__API_PORT: "3001" + COUNTLY_CONFIG__API_HOST: "0.0.0.0" + COUNTLY_CONFIG__API_MAX_SOCKETS: "1024" + COUNTLY_CONFIG__API_MAX_UPLOAD_FILE_SIZE: "209715200" # 200 MiB + COUNTLY_CONFIG__API_TIMEOUT: "120000" # ms + + # --- Frontend --- + frontend: + COUNTLY_CONTAINER: frontend + COUNTLY_CONFIG__WEB_PORT: "6001" + COUNTLY_CONFIG__WEB_HOST: "0.0.0.0" + COUNTLY_CONFIG__WEB_SECURE_COOKIES: "false" + COUNTLY_CONFIG__COOKIE_MAXAGE: "86400000" # 24 hours in ms + + # --- Ingestor --- + ingestor: + COUNTLY_CONTAINER: ingestor + COUNTLY_CONFIG__INGESTOR_PORT: "3010" + COUNTLY_CONFIG__INGESTOR_HOST: "0.0.0.0" + + # --- Aggregator --- + aggregator: + COUNTLY_CONTAINER: aggregator + UV_THREADPOOL_SIZE: "6" + + # --- Job Server --- + jobserver: + COUNTLY_CONTAINER: jobserver + COUNTLY_CONFIG__JOBSERVER_PORT: "3020" + COUNTLY_CONFIG__JOBSERVER_HOST: "0.0.0.0" + + # --- ClickHouse Connection --- + clickhouse: + COUNTLY_CONFIG__CLICKHOUSE_QUERYOPTIONS_MAX_EXECUTION_TIME: "600" + COUNTLY_CONFIG__CLICKHOUSE_REQUEST_TIMEOUT: "1200000" # ms + COUNTLY_CONFIG__CLICKHOUSE_MAX_OPEN_CONNECTIONS: "10" + COUNTLY_CONFIG__CLICKHOUSE_APPLICATION: countly_drill + COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_REQUEST: "false" + COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_RESPONSE: "false" + COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_ENABLED: "true" + COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_IDLE_SOCKET_TTL: "10000" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_NAME: countly_cluster + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_SHARDS: "false" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_REPLICAS: "false" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_ISCLOUD: "false" + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_COORDINATORTYPE: keeper + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_ZKPATH: "/clickhouse/tables/{shard}/{database}/{table}" + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_REPLICANAME: "{replica}" + COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_ENABLED: "false" + COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_MAXPARALLELREPLICAS: "2" + COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_WRITETHROUGH: "true" + COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_INSERTDISTRIBUTEDSYNC: "true" + COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_NATIVEPORT: "9000" + COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_SECURE: "false" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_DAYSOLD: "30" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MIN: "60" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MAX: "120" + + # --- Kafka Connection --- + kafka: + COUNTLY_CONFIG__KAFKA_ENABLED: "true" + COUNTLY_CONFIG__KAFKA_DRILLEVENTSTOPIC: drill-events + COUNTLY_CONFIG__KAFKA_CLUSTER_NAME: cly-kafka + COUNTLY_CONFIG__KAFKA_PARTITIONS: "100" + COUNTLY_CONFIG__KAFKA_REPLICATIONFACTOR: "2" + COUNTLY_CONFIG__KAFKA_RETENTIONMS: "604800000" # 7 days in ms + COUNTLY_CONFIG__KAFKA_ENABLETRANSACTIONS: "false" + COUNTLY_CONFIG__KAFKA_TRANSACTIONTIMEOUT: "60000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_CLIENTID: countly-app + COUNTLY_CONFIG__KAFKA_RDKAFKA_REQUESTTIMEOUTMS: "20000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_CONNECTIONTIMEOUTMS: "8000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_LINGERMS: "10" + COUNTLY_CONFIG__KAFKA_RDKAFKA_RETRIES: "5" + COUNTLY_CONFIG__KAFKA_RDKAFKA_ACKS: "-1" # -1 = all ISR replicas must acknowledge + COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMINBYTES: "1024000" + COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMAXWAITMS: "1200" + COUNTLY_CONFIG__KAFKA_CONSUMER_SESSIONTIMEOUTMS: "120000" + COUNTLY_CONFIG__KAFKA_CONSUMER_HEARTBEATINTERVALMS: "20000" + COUNTLY_CONFIG__KAFKA_CONSUMER_AUTOOFFSETRESET: earliest + COUNTLY_CONFIG__KAFKA_CONSUMER_ENABLEAUTOCOMMIT: "false" + COUNTLY_CONFIG__KAFKA_CONSUMER_MAXPOLLINTERVALMS: "600000" + COUNTLY_CONFIG__KAFKA_CONNECTCONSUMERGROUPID: "connect-ch" + + # --- OpenTelemetry --- + otel: + OTEL_ENABLED: "false" + OTEL_EXPORTER_OTLP_ENDPOINT: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4318" + OTEL_EXPORTER_OTLP_PROTOCOL: "http/protobuf" + OTEL_TRACES_SAMPLER: "parentbased_traceidratio" + OTEL_TRACES_SAMPLER_ARG: "1.0" # 0.0-1.0, fraction of traces to sample + PYROSCOPE_ENABLED: "false" + +# --- Node.js Options (injected into configmap per component) --- +nodeOptions: + api: "--max-old-space-size=3072 --max-semi-space-size=256" + frontend: "--max-old-space-size=2048" + ingestor: "--max-old-space-size=2048 --max-semi-space-size=256" + aggregator: "--max-old-space-size=3072 --max-semi-space-size=128" + jobserver: "--max-old-space-size=2048 --max-semi-space-size=256" + +# ============================================================================= +# Backing Service Modes +# ============================================================================= +# When mode=external, the corresponding chart is not deployed and connection +# details below are used instead. + +backingServices: + mongodb: + mode: bundled # bundled | external + host: "" + port: "27017" + connectionString: "" # If set, used as-is (bypasses host/port/user/pass) + username: "app" + password: "" + database: "admin" + replicaSet: "" + existingSecret: "" + # --- External MongoDB Atlas example --- + # mode: external + # connectionString: "mongodb+srv://user:pass@cluster0.example.mongodb.net/admin?retryWrites=true&w=majority" + + clickhouse: + mode: bundled # bundled | external + host: "" + port: "8123" + tls: "false" + username: "default" + password: "" + database: "countly_drill" + existingSecret: "" + # --- External ClickHouse Cloud example --- + # mode: external + # host: "abc123.us-east-1.aws.clickhouse.cloud" + # port: "8443" + # tls: "true" + + kafka: + mode: bundled # bundled | external + brokers: "" # Comma-separated broker list + securityProtocol: "PLAINTEXT" # PLAINTEXT | SSL | SASL_PLAINTEXT | SASL_SSL + saslMechanism: "" + saslUsername: "" + saslPassword: "" + existingSecret: "" + # --- External Confluent Cloud example --- + # mode: external + # brokers: "pkc-12345.us-east-1.aws.confluent.cloud:9092" + # securityProtocol: "SASL_SSL" + # saslMechanism: "PLAIN" + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + mode: values # values | existingSecret | externalSecret + keep: true # Retain secrets on helm uninstall + rotationId: "" # Change to force secret re-creation + + common: + existingSecret: "" + encryptionReportsKey: "" # REQUIRED: min 8 chars + webSessionSecret: "" # REQUIRED: min 8 chars + passwordSecret: "" # REQUIRED: min 8 chars + + clickhouse: + existingSecret: "" + username: "" + password: "" + database: "" + + kafka: + existingSecret: "" + securityProtocol: "" + saslMechanism: "" + saslUsername: "" + saslPassword: "" + + mongodb: + existingSecret: "" + key: "connectionString.standard" # Key within the secret to read + password: "" # REQUIRED on first install (must match users.app.password in countly-mongodb) + + # --- ExternalSecret configuration (used only when mode=externalSecret) --- + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: "" + webSessionSecret: "" + passwordSecret: "" + clickhouse: + url: "" + username: "" + password: "" + database: "" + kafka: + brokers: "" + securityProtocol: "" + saslMechanism: "" + saslUsername: "" + saslPassword: "" + mongodb: + connectionString: "" + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + ingressNamespaceSelector: + kubernetes.io/metadata.name: ingress-nginx + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-custom-namespace + # ports: + # - port: 3001 + # protocol: TCP + +# ============================================================================= +# Ingress +# ============================================================================= +ingress: + enabled: true + className: nginx + annotations: + # F5 NGINX Ingress Controller (OSS) annotations + nginx.org/client-max-body-size: "50m" + nginx.org/proxy-buffering: "True" + nginx.org/proxy-buffer-size: "256k" + nginx.org/proxy-buffers: "16 256k" + nginx.org/proxy-busy-buffers-size: "512k" + nginx.org/proxy-max-temp-file-size: "2048m" + nginx.org/client-body-buffer-size: "2m" + nginx.org/proxy-connect-timeout: "60s" + nginx.org/proxy-read-timeout: "120s" + nginx.org/proxy-send-timeout: "120s" + nginx.org/keepalive: "256" + nginx.org/server-snippets: | + otel_trace on; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_set_header Connection ""; + proxy_set_header X-Request-ID $request_id; + proxy_set_header X-Request-Start $msec; + # traceparent/tracestate now handled by ngx_otel_module (otel-trace-context: propagate) + client_header_timeout 30s; + nginx.org/location-snippets: | + proxy_request_buffering on; + proxy_next_upstream error timeout http_502 http_503 http_504; + proxy_next_upstream_timeout 30s; + proxy_next_upstream_tries 3; + proxy_temp_file_write_size 1m; + client_body_timeout 120s; + hostname: countly.example.com + tls: + # TLS mode: letsencrypt | existingSecret | selfSigned | http + # http: No TLS + # letsencrypt: cert-manager + Let's Encrypt (recommended for production) + # existingSecret: Bring your own TLS secret + # selfSigned: cert-manager self-signed CA (for development) + mode: http + clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt + secretName: "" # Auto-derived if empty: -tls + selfSigned: + issuerName: "" # Auto-derived if empty: -ca-issuer + caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/environments/argo1/external-secrets.example.yaml b/environments/argo1/external-secrets.example.yaml new file mode 100644 index 0000000..7bf93ef --- /dev/null +++ b/environments/argo1/external-secrets.example.yaml @@ -0,0 +1,36 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in environments//countly.yaml: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: my-secret-store +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "countly/encryption-reports-key" +# webSessionSecret: "countly/web-session-secret" +# passwordSecret: "countly/password-secret" +# clickhouse: +# url: "countly/clickhouse-url" +# username: "countly/clickhouse-username" +# password: "countly/clickhouse-password" +# database: "countly/clickhouse-database" +# kafka: +# brokers: "countly/kafka-brokers" +# securityProtocol: "countly/kafka-security-protocol" +# mongodb: +# connectionString: "countly/mongodb-connection-string" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/argo1/global.yaml b/environments/argo1/global.yaml new file mode 100644 index 0000000..c3bf726 --- /dev/null +++ b/environments/argo1/global.yaml @@ -0,0 +1,26 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + storageClass: "" + imagePullSecrets: [] + +ingress: + hostname: argo1.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/argo1/kafka.yaml b/environments/argo1/kafka.yaml new file mode 100644 index 0000000..519c826 --- /dev/null +++ b/environments/argo1/kafka.yaml @@ -0,0 +1,345 @@ +# ============================================================================= +# Kafka Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-kafka/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Strimzi Operator API Version --- +strimzi: + apiVersion: kafka.strimzi.io/v1 + +# --- Kafka Version --- +version: "4.2.0" + +# ============================================================================= +# Brokers +# ============================================================================= +brokers: + replicas: 3 + resources: + requests: + cpu: "1" + memory: "4Gi" + limits: + cpu: "1" + memory: "4Gi" + jvmOptions: + xms: "2g" + xmx: "2g" + + # --- Persistence --- + persistence: + volumes: + - id: 0 + size: 100Gi + storageClass: "" + deleteClaim: false # Delete PVC when broker is removed + + # --- Broker Config --- + config: + default.replication.factor: 2 + min.insync.replicas: 2 + log.retention.hours: 168 # 7 days + log.segment.bytes: "1073741824" # 1 GiB + compression.type: lz4 + auto.create.topics.enable: false + offsets.topic.replication.factor: 2 + num.partitions: 24 + transaction.state.log.replication.factor: 2 + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +# ============================================================================= +# Controllers (KRaft) +# ============================================================================= +controllers: + replicas: 3 + resources: + requests: + cpu: "500m" + memory: "2Gi" + limits: + cpu: "1" + memory: "2Gi" + + persistence: + size: 20Gi + storageClass: "" + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +# ============================================================================= +# Listeners +# ============================================================================= +listeners: + - name: internal + port: 9092 + type: internal + tls: false + +# ============================================================================= +# Cruise Control +# ============================================================================= +cruiseControl: + enabled: true + resources: + requests: + cpu: "1" + memory: "2Gi" + limits: + cpu: "1" + memory: "2Gi" + jvmOptions: + xms: "1g" + xmx: "2g" + autoRebalance: + - mode: add-brokers + - mode: remove-brokers + +# ============================================================================= +# Kafka Connect (ClickHouse Sink) +# ============================================================================= +kafkaConnect: + enabled: true + name: connect-ch + image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + replicas: 2 + bootstrapServers: "" # Auto-derived from cluster if empty + + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "2" + memory: "8Gi" + jvmOptions: + xms: "5g" + xmx: "5g" + + # --- Worker Configuration --- + workerConfig: + group.id: connect-ch + config.storage.topic: connect_ch_configs + offset.storage.topic: connect_ch_offsets + status.storage.topic: connect_ch_status + config.storage.replication.factor: 2 + offset.storage.replication.factor: 2 + status.storage.replication.factor: 2 + offset.storage.partitions: 25 + status.storage.partitions: 5 + key.converter: org.apache.kafka.connect.storage.StringConverter + value.converter: org.apache.kafka.connect.json.JsonConverter + value.converter.schemas.enable: "false" + connector.client.config.override.policy: All + config.providers: env + config.providers.env.class: org.apache.kafka.common.config.provider.EnvVarConfigProvider + + # --- ClickHouse Connection (for the sink connector) --- + clickhouse: + existingSecret: "" + secretName: clickhouse-auth + host: "" # Auto-derived from clickhouseNamespace if empty + port: "8123" + ssl: "false" + database: "countly_drill" + username: "default" + password: "" # REQUIRED: must match ClickHouse default user password + + # --- Environment Variables (injected into Connect pods) --- + env: + EXACTLY_ONCE: "false" + ERRORS_RETRY_TIMEOUT: "300" + ERRORS_TOLERANCE: "none" # none | all + CLICKHOUSE_SETTINGS: "input_format_binary_read_json_as_string=1,allow_experimental_json_type=1,enable_json_type=1,async_insert=1,wait_for_async_insert=1,async_insert_use_adaptive_busy_timeout=1,async_insert_busy_timeout_ms=10000,async_insert_max_data_size=268435456,async_insert_max_query_number=64,min_insert_block_size_rows=250000,min_insert_block_size_bytes=268435456,max_partitions_per_insert_block=500" + BYPASS_ROW_BINARY: "false" + TABLE_REFRESH_INTERVAL: "300" # seconds + KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter + VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter + VALUE_CONVERTER_SCHEMAS_ENABLE: "false" + KAFKA_CONSUMER_FETCH_MIN_BYTES: "33554432" # 32 MiB + KAFKA_CONSUMER_FETCH_MAX_WAIT_MS: "60000" + KAFKA_CONSUMER_MAX_POLL_RECORDS: "250000" + KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES: "134217728" # 128 MiB + KAFKA_CONSUMER_FETCH_MAX_BYTES: "536870912" # 512 MiB + KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS: "900000" + KAFKA_CONSUMER_SESSION_TIMEOUT_MS: "45000" + KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS: "15000" + KAFKA_CONSUMER_REQUEST_TIMEOUT_MS: "120000" + + # --- HPA --- + hpa: + enabled: false + minReplicas: 1 + maxReplicas: 3 + metrics: + cpu: + averageUtilization: 70 + memory: + averageUtilization: 80 + behavior: + scaleUp: + stabilizationWindowSeconds: 120 + policies: + - type: Percent + value: 50 + periodSeconds: 120 + - type: Pods + value: 2 + periodSeconds: 120 + selectPolicy: Min + scaleDown: + stabilizationWindowSeconds: 600 + policies: + - type: Percent + value: 25 + periodSeconds: 300 + - type: Pods + value: 1 + periodSeconds: 300 + selectPolicy: Min + + # --- OpenTelemetry Java Agent --- + # Baked into the Docker image at /opt/otel/opentelemetry-javaagent.jar. + # When enabled, JAVA_TOOL_OPTIONS activates it for Kafka consumer/producer + # and outbound HTTP (ClickHouse sink) span creation. + otel: + enabled: false + serviceName: "kafka-connect" + exporterEndpoint: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4317" + exporterProtocol: "grpc" + sampler: "parentbased_traceidratio" + samplerArg: "1.0" + resourceAttributes: "" # e.g. "deployment.environment=production,k8s.cluster.name=my-cluster" + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + + # --- Connectors --- + connectors: + - name: ch-sink-drill-events + enabled: true + state: running # running | paused | stopped + class: com.clickhouse.kafka.connect.ClickHouseSinkConnector + tasksMax: 1 + autoRestart: + enabled: true + maxRestarts: 10 + config: + topics: drill-events + topic2TableMap: "drill-events=drill_events" + hostname: "${env:CLICKHOUSE_HOST}" + port: "${env:CLICKHOUSE_PORT}" + ssl: "${env:CLICKHOUSE_SSL}" + database: "${env:CLICKHOUSE_DB}" + username: "${env:CLICKHOUSE_USER}" + password: "${env:CLICKHOUSE_PASSWORD}" + exactlyOnce: "${env:EXACTLY_ONCE}" + errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" + errors.tolerance: "${env:ERRORS_TOLERANCE}" + clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" + bypassRowBinary: "${env:BYPASS_ROW_BINARY}" + tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" + key.converter: "${env:KEY_CONVERTER}" + value.converter: "${env:VALUE_CONVERTER}" + value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" + consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" + consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" + consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" + consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" + consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" + consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" + consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" + consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" + consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" + connection.timeout: "60" + socket.timeout: "30000" + retry.count: "3" + connection.pool.size: "10" + healthcheck.enabled: "true" + healthcheck.interval: "10000" + dlq: {} # Dead-letter queue config (empty = disabled) + +# ============================================================================= +# Metrics +# ============================================================================= +metrics: + enabled: true + +# --- Cross-Namespace Reference --- +clickhouseNamespace: clickhouse + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 9092 + # protocol: TCP + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall +# Kafka overrides for argo1 +# +# Migration is disabled for this customer, so the drill ClickHouse sink +# connector must stay off to avoid waiting on migration-owned tables. + +kafkaConnect: + connectors: + - name: ch-sink-drill-events + enabled: false diff --git a/environments/argo1/migration.yaml b/environments/argo1/migration.yaml new file mode 100644 index 0000000..6fa760c --- /dev/null +++ b/environments/argo1/migration.yaml @@ -0,0 +1,3 @@ +# Migration overrides for optional countly-migration app. +# Enable per customer by setting `migration: enabled` in argocd/customers/.yaml +# and then filling this file with environment-specific overrides as needed. diff --git a/environments/argo1/mongodb.yaml b/environments/argo1/mongodb.yaml new file mode 100644 index 0000000..31f230e --- /dev/null +++ b/environments/argo1/mongodb.yaml @@ -0,0 +1,144 @@ +# ============================================================================= +# MongoDB Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-mongodb/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# ============================================================================= +# MongoDB Server +# ============================================================================= +mongodb: + version: "8.2.5" + members: 2 # Replica set member count + + resources: + requests: + cpu: "500m" + memory: "2Gi" + limits: + cpu: "2" + memory: "8Gi" + + persistence: + storageClass: "" # Overrides global.storageClass for MongoDB PVCs + size: 100Gi + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 + + # --- TLS --- + tls: + enabled: false + +# ============================================================================= +# Users +# ============================================================================= +users: + # --- Application User --- + app: + name: app + database: admin + roles: + - name: readWriteAnyDatabase + db: admin + - name: dbAdmin + db: admin + passwordSecretName: app-user-password + passwordSecretKey: password + password: "" # REQUIRED on first install + + # --- Metrics Exporter User --- + metrics: + enabled: true + name: metrics + database: admin + roles: + - name: clusterMonitor + db: admin + - name: read + db: local + passwordSecretName: metrics-user-password + passwordSecretKey: password + password: "" # REQUIRED on first install + +# ============================================================================= +# Prometheus Exporter +# ============================================================================= +exporter: + enabled: true + image: percona/mongodb_exporter:0.40.0 + port: 9216 + resources: + requests: + cpu: "50m" + memory: "64Mi" + limits: + cpu: "200m" + memory: "256Mi" + service: + enabled: true + args: + - --collect-all + - --collector.diagnosticdata + - --collector.replicasetstatus + - --collector.dbstats + - --collector.topmetrics + - --collector.indexstats + - --collector.collstats + +# ============================================================================= +# Pod Disruption Budget +# ============================================================================= +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 27017 + # protocol: TCP + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall diff --git a/environments/argo1/observability.yaml b/environments/argo1/observability.yaml new file mode 100644 index 0000000..79a4b39 --- /dev/null +++ b/environments/argo1/observability.yaml @@ -0,0 +1,437 @@ +# ============================================================================= +# Observability Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-observability/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Deployment Mode --- +# full — All backends + Grafana in-cluster +# hybrid — All backends in-cluster, no Grafana (use external Grafana) +# external — Collectors only, forward to external endpoints +# disabled — Set in global.yaml observability.mode to skip this chart entirely +mode: full + +# --- Cluster Name (injected into Prometheus external_labels) --- +clusterName: countly-local + +# --- Cross-Namespace References --- +countlyNamespace: countly +clickhouseNamespace: clickhouse +mongodbNamespace: mongodb +kafkaNamespace: kafka +ingressNamespace: ingress-nginx +certManagerNamespace: cert-manager +clickhouseOperatorNamespace: clickhouse-operator-system + +# --- NGINX Ingress Controller Scrape Configuration --- +nginxIngress: + podLabelName: "nginx-ingress" # F5 NGINX IC = "nginx-ingress", community = "ingress-nginx" + metricsPort: "9113" + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +# ============================================================================= +# Per-Signal Configuration +# ============================================================================= + +# --- Metrics --- +metrics: + enabled: true + sampling: + interval: "15s" # Global Prometheus scrape_interval + +# --- Traces --- +traces: + enabled: true + sampling: + strategy: "AlwaysOn" # AlwaysOn | TraceIdRatio | ParentBased | TailBased + ratio: 1.0 # 0.0-1.0, used with TraceIdRatio or ParentBased + tailSampling: # Only used when strategy == TailBased + waitDuration: "10s" + numTraces: 50000 + policies: + keepErrors: true + latencyThresholdMs: 2000 + baselineRatio: 0.1 + +# --- Logs --- +logs: + enabled: true + sampling: + enabled: false + dropRate: 0 # 0.0-1.0, fraction of logs to drop + +# --- Profiling --- +profiling: + enabled: true + sampling: + rate: "100" # Advisory — used in NOTES.txt for SDK config + +# ============================================================================= +# Prometheus +# ============================================================================= +prometheus: + image: + repository: prom/prometheus + tag: "v3.10.0" + retention: + time: "30d" + size: "50GB" + storage: + size: 100Gi + storageClass: "" + resources: + requests: + cpu: "2" + memory: "3Gi" + limits: + cpu: "2" + memory: "4Gi" + extraArgs: [] + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + remoteWriteUrl: "" # Full Prometheus remote write URL (e.g. https://prom.corp.com/api/v1/write) + +# ============================================================================= +# Loki +# ============================================================================= +loki: + image: + repository: grafana/loki + tag: "3.6.7" + retention: "30d" + storage: + backend: "filesystem" # filesystem | s3 | gcs | azure + size: 100Gi + storageClass: "" + # Object storage settings (only used when backend != filesystem) + bucket: "" # Bucket/container name (REQUIRED for object backends) + endpoint: "" # Custom endpoint (e.g. MinIO: http://minio:9000) + region: "" # Cloud region (S3) + insecure: false # Use HTTP instead of HTTPS + forcePathStyle: false # S3 path-style access (required for MinIO) + # Credential file secret (for GCS JSON key files) + existingSecret: "" # K8s Secret name to mount + secretKey: "key.json" # Key within the Secret + secretMountPath: "/var/secrets/storage" + # Env-based credentials (for AWS access keys, Azure account keys) + envFromSecret: "" # K8s Secret name to inject as env vars + # Provider-specific passthrough (rendered directly into provider block) + config: {} + # + # --- Object storage examples (apply to loki, tempo, and pyroscope) --- + # + # --- AWS S3 example --- + # backend: s3 + # s3: + # bucket: my-loki-data + # region: us-east-1 + # endpoint: "" + # insecure: false + # forcePathStyle: false + # credentials: + # source: envFromSecret + # envFromSecret: loki-s3-credentials # Must contain AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY + # + # --- GCS example --- + # backend: gcs + # gcs: + # bucket: my-loki-data + # credentials: + # source: existingSecret + # existingSecret: loki-gcs-key + # secretKey: "key.json" + # + # --- MinIO example --- + # backend: s3 + # s3: + # bucket: loki + # endpoint: minio.storage.svc.cluster.local:9000 + # insecure: true + # forcePathStyle: true + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + config: + maxStreamsPerUser: 30000 + maxLineSize: 256000 + ingestionRateMb: 64 + ingestionBurstSizeMb: 128 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + pushUrl: "" # Full Loki push URL (e.g. https://loki.corp.com/loki/api/v1/push) + +# ============================================================================= +# Tempo +# ============================================================================= +tempo: + image: + repository: grafana/tempo + tag: "2.10.1" + retention: "12h" + storage: + backend: "local" # local | s3 | gcs | azure + size: 150Gi + storageClass: "" + bucket: "" + endpoint: "" + region: "" + insecure: false + forcePathStyle: false + existingSecret: "" + secretKey: "key.json" + secretMountPath: "/var/secrets/storage" + envFromSecret: "" + config: {} + resources: + requests: + cpu: "3" + memory: "6Gi" + limits: + cpu: "4" + memory: "10Gi" + config: + ingestionRateLimitBytes: 100000000 + ingestionBurstSizeBytes: 150000000 + maxTracesPerUser: 50000 + maxBytesPerTrace: 5000000 + maxRecvMsgSizeMiB: 16 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + otlpGrpcEndpoint: "" # Tempo OTLP gRPC host:port (e.g. tempo.corp.com:4317) + otlpHttpEndpoint: "" # Tempo OTLP HTTP URL (optional fallback) + +# ============================================================================= +# Pyroscope +# ============================================================================= +pyroscope: + image: + repository: grafana/pyroscope + tag: "1.18.1" + retention: "72h" + storage: + backend: "filesystem" # filesystem | s3 | gcs | azure | swift + size: 20Gi + storageClass: "" + bucket: "" + endpoint: "" + region: "" + insecure: false + forcePathStyle: false + existingSecret: "" + secretKey: "key.json" + secretMountPath: "/var/secrets/storage" + envFromSecret: "" + config: {} + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + ingestUrl: "" # Pyroscope ingest URL (e.g. https://pyroscope.corp.com) + +# ============================================================================= +# Grafana +# ============================================================================= +grafana: + enabled: true # Only deployed when mode == "full" + image: + repository: grafana/grafana + tag: "12.4.0" + admin: + existingSecret: "" # Use an existing Secret for admin credentials + userKey: "admin-user" + passwordKey: "admin-password" + persistence: + enabled: false # Ephemeral by default (declarative config, no state to lose) + size: 10Gi + storageClass: "" + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + plugins: + install: "grafana-pyroscope-datasource" + featureToggles: "tempoSearch,tempoBackendSearch,traceqlEditor,exploreTraces" + dashboards: + enabled: true + overview: true + platform: true + countly: true + data: true + edge: true + pdb: + enabled: false + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + url: "" # External Grafana URL (for NOTES.txt only) + +# ============================================================================= +# Alloy (DaemonSet — log collection) +# ============================================================================= +alloy: + image: + repository: grafana/alloy + tag: "v1.13.2" + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + scheduling: + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + +# ============================================================================= +# Alloy-OTLP (Deployment — OTLP traces + profiling receive) +# ============================================================================= +alloyOtlp: + image: + repository: grafana/alloy + tag: "v1.13.2" + replicas: 1 + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + memoryLimiter: + limit: "1600MiB" # Must be < resources.limits.memory + spikeLimit: "400MiB" + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# Alloy-Metrics (Deployment — ALL Prometheus scraping) +# ============================================================================= +alloyMetrics: + image: + repository: grafana/alloy + tag: "v1.13.2" + replicas: 1 + resources: + requests: + cpu: "500m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "512Mi" + pdb: + enabled: false + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# kube-state-metrics +# ============================================================================= +kubeStateMetrics: + enabled: true + image: + repository: registry.k8s.io/kube-state-metrics/kube-state-metrics + tag: "v2.18.0" + resources: + requests: + cpu: "10m" + memory: "32Mi" + limits: + cpu: "100m" + memory: "256Mi" + namespaces: + - countly + - observability + - ingress-nginx + - kube-system + - clickhouse + - mongodb + - kafka + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# node-exporter +# ============================================================================= +nodeExporter: + enabled: true + image: + repository: prom/node-exporter + tag: "v1.10.2" + resources: + requests: + cpu: "100m" + memory: "180Mi" + limits: + cpu: "250m" + memory: "300Mi" + +# ============================================================================= +# Ingress (for Grafana) +# ============================================================================= +ingress: + enabled: false + className: nginx + annotations: {} + hosts: + - host: obs.example.com + tls: [] + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 4318 + # protocol: TCP diff --git a/environments/argo1/secrets-clickhouse.yaml b/environments/argo1/secrets-clickhouse.yaml new file mode 100644 index 0000000..653806f --- /dev/null +++ b/environments/argo1/secrets-clickhouse.yaml @@ -0,0 +1,4 @@ +# ClickHouse direct secrets for argo1 +auth: + defaultUserPassword: + password: "argo1-clickhouse-2026" diff --git a/environments/argo1/secrets-countly.yaml b/environments/argo1/secrets-countly.yaml new file mode 100644 index 0000000..d971003 --- /dev/null +++ b/environments/argo1/secrets-countly.yaml @@ -0,0 +1,15 @@ +# Countly direct secrets for argo1 +secrets: + mode: values + common: + encryptionReportsKey: "argo1-reports-key-2026" + webSessionSecret: "argo1-web-session-2026" + passwordSecret: "argo1-password-secret-2026" + clickhouse: + username: "default" + password: "argo1-clickhouse-2026" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + mongodb: + password: "argo1-mongo-2026" diff --git a/environments/argo1/secrets-kafka.yaml b/environments/argo1/secrets-kafka.yaml new file mode 100644 index 0000000..3afbe21 --- /dev/null +++ b/environments/argo1/secrets-kafka.yaml @@ -0,0 +1,4 @@ +# Kafka direct secrets for argo1 +kafkaConnect: + clickhouse: + password: "argo1-clickhouse-2026" diff --git a/environments/argo1/secrets-migration.yaml b/environments/argo1/secrets-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/argo1/secrets-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/argo1/secrets-mongodb.yaml b/environments/argo1/secrets-mongodb.yaml new file mode 100644 index 0000000..d0e245d --- /dev/null +++ b/environments/argo1/secrets-mongodb.yaml @@ -0,0 +1,7 @@ +# MongoDB direct secrets for argo1 +users: + app: + password: "argo1-mongo-2026" + metrics: + enabled: true + password: "argo1-metrics-2026" diff --git a/environments/argo1/secrets-observability.yaml b/environments/argo1/secrets-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/argo1/secrets-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/argo1/secrets.example.yaml b/environments/argo1/secrets.example.yaml new file mode 100644 index 0000000..282eb0d --- /dev/null +++ b/environments/argo1/secrets.example.yaml @@ -0,0 +1,42 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//secrets-countly.yaml) --- +secrets: + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- +users: + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//secrets-kafka.yaml) --- +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" diff --git a/environments/argo1/secrets.sops.example.yaml b/environments/argo1/secrets.sops.example.yaml new file mode 100644 index 0000000..9b652d1 --- /dev/null +++ b/environments/argo1/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//secrets-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//secrets-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From a6aecbba7ff7fd712444622b53a8e1487f0b679a Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 13:43:19 +0530 Subject: [PATCH 24/79] Fix argo1 kafka connector override --- environments/argo1/kafka.yaml | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/environments/argo1/kafka.yaml b/environments/argo1/kafka.yaml index 519c826..f4edaab 100644 --- a/environments/argo1/kafka.yaml +++ b/environments/argo1/kafka.yaml @@ -343,3 +343,43 @@ kafkaConnect: connectors: - name: ch-sink-drill-events enabled: false + state: running + class: com.clickhouse.kafka.connect.ClickHouseSinkConnector + tasksMax: 1 + autoRestart: + enabled: true + maxRestarts: 10 + config: + topics: drill-events + topic2TableMap: "drill-events=drill_events" + hostname: "${env:CLICKHOUSE_HOST}" + port: "${env:CLICKHOUSE_PORT}" + ssl: "${env:CLICKHOUSE_SSL}" + database: "${env:CLICKHOUSE_DB}" + username: "${env:CLICKHOUSE_USER}" + password: "${env:CLICKHOUSE_PASSWORD}" + exactlyOnce: "${env:EXACTLY_ONCE}" + errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" + errors.tolerance: "${env:ERRORS_TOLERANCE}" + clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" + bypassRowBinary: "${env:BYPASS_ROW_BINARY}" + tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" + key.converter: "${env:KEY_CONVERTER}" + value.converter: "${env:VALUE_CONVERTER}" + value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" + consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" + consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" + consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" + consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" + consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" + consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" + consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" + consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" + consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" + connection.timeout: "60" + socket.timeout: "30000" + retry.count: "3" + connection.pool.size: "10" + healthcheck.enabled: "true" + healthcheck.interval: "10000" + dlq: {} From 33f3d11c466f7cda8398e15260c1b565be92a9c5 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 13:57:15 +0530 Subject: [PATCH 25/79] Map Countly hostname and TLS from customer metadata --- argocd/README.md | 13 ++++++++++++- argocd/applicationsets/03-countly.yaml | 4 ++++ argocd/customers/argo1.yaml | 1 + environments/argo1/countly.yaml | 4 ++-- scripts/new-argocd-customer.sh | 1 + 5 files changed, 20 insertions(+), 3 deletions(-) diff --git a/argocd/README.md b/argocd/README.md index 1f59684..b8d971f 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -22,8 +22,9 @@ For the initial rollout, ArgoCD is scoped to one customer metadata file: 3. Create or update one customer metadata file in: - `argocd/customers/.yaml` - The `server:` value must match the cluster entry registered in ArgoCD. + - Set `hostname:` to the customer domain. 4. Replace the environment hostname in: - - `environments/helm-argocd/global.yaml` + - `environments//global.yaml` 5. Populate the direct values in the customer `secrets-*.yaml` files before the first deploy. 6. Configure ArgoCD custom health checks for MongoDB, ClickHouse, and Strimzi CRs. @@ -61,3 +62,13 @@ kubectl apply -f argocd/root-application.yaml -n argocd Only two Git-managed inputs are required per new customer: - `environments//` - `argocd/customers/.yaml` + +Customer metadata is the source of truth for: +- `server` +- `hostname` +- `sizing` +- `security` +- `tls` +- `observability` +- `kafkaConnect` +- `migration` diff --git a/argocd/applicationsets/03-countly.yaml b/argocd/applicationsets/03-countly.yaml index 16ef359..328fbad 100644 --- a/argocd/applicationsets/03-countly.yaml +++ b/argocd/applicationsets/03-countly.yaml @@ -37,6 +37,10 @@ spec: parameters: - name: argocd.enabled value: "true" + - name: ingress.hostname + value: "{{ .hostname }}" + - name: ingress.tls.mode + value: '{{ if eq .tls "none" }}http{{ else if eq .tls "provided" }}existingSecret{{ else }}{{ .tls }}{{ end }}' - name: global.sizing value: "{{ .sizing }}" - name: global.security diff --git a/argocd/customers/argo1.yaml b/argocd/customers/argo1.yaml index 1b785cb..2913516 100644 --- a/argocd/customers/argo1.yaml +++ b/argocd/customers/argo1.yaml @@ -2,6 +2,7 @@ customer: argo1 environment: argo1 project: countly-customers server: https://35.226.153.84 +hostname: argo1.count.ly sizing: tier1 security: open tls: letsencrypt diff --git a/environments/argo1/countly.yaml b/environments/argo1/countly.yaml index 4ba46fa..5c6ddfa 100644 --- a/environments/argo1/countly.yaml +++ b/environments/argo1/countly.yaml @@ -555,14 +555,14 @@ ingress: proxy_next_upstream_tries 3; proxy_temp_file_write_size 1m; client_body_timeout 120s; - hostname: countly.example.com + hostname: argo1.count.ly tls: # TLS mode: letsencrypt | existingSecret | selfSigned | http # http: No TLS # letsencrypt: cert-manager + Let's Encrypt (recommended for production) # existingSecret: Bring your own TLS secret # selfSigned: cert-manager self-signed CA (for development) - mode: http + mode: letsencrypt clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt secretName: "" # Auto-derived if empty: -tls selfSigned: diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index ecb72a9..cf7854f 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -86,6 +86,7 @@ customer: ${customer} environment: ${customer} project: ${project} server: ${server} +hostname: ${hostname} sizing: production security: open tls: letsencrypt From c70dcbb10ea596ec083ea1d8dc63ec7cb641d418 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 14:09:19 +0530 Subject: [PATCH 26/79] Add argo2 test customer --- .gitignore | 1 + argocd/customers/argo2.yaml | 11 + environments/argo2/README.md | 69 +++ environments/argo2/clickhouse.yaml | 203 +++++++ environments/argo2/countly-tls.env | 7 + environments/argo2/countly.yaml | 570 ++++++++++++++++++ .../argo2/external-secrets.example.yaml | 36 ++ environments/argo2/global.yaml | 26 + environments/argo2/kafka.yaml | 384 ++++++++++++ environments/argo2/migration.yaml | 3 + environments/argo2/mongodb.yaml | 144 +++++ environments/argo2/observability.yaml | 437 ++++++++++++++ environments/argo2/secrets-clickhouse.yaml | 4 + environments/argo2/secrets-countly.yaml | 15 + environments/argo2/secrets-kafka.yaml | 4 + environments/argo2/secrets-migration.yaml | 2 + environments/argo2/secrets-mongodb.yaml | 7 + environments/argo2/secrets-observability.yaml | 2 + environments/argo2/secrets.example.yaml | 42 ++ environments/argo2/secrets.sops.example.yaml | 21 + 20 files changed, 1988 insertions(+) create mode 100644 argocd/customers/argo2.yaml create mode 100644 environments/argo2/README.md create mode 100644 environments/argo2/clickhouse.yaml create mode 100644 environments/argo2/countly-tls.env create mode 100644 environments/argo2/countly.yaml create mode 100644 environments/argo2/external-secrets.example.yaml create mode 100644 environments/argo2/global.yaml create mode 100644 environments/argo2/kafka.yaml create mode 100644 environments/argo2/migration.yaml create mode 100644 environments/argo2/mongodb.yaml create mode 100644 environments/argo2/observability.yaml create mode 100644 environments/argo2/secrets-clickhouse.yaml create mode 100644 environments/argo2/secrets-countly.yaml create mode 100644 environments/argo2/secrets-kafka.yaml create mode 100644 environments/argo2/secrets-migration.yaml create mode 100644 environments/argo2/secrets-mongodb.yaml create mode 100644 environments/argo2/secrets-observability.yaml create mode 100644 environments/argo2/secrets.example.yaml create mode 100644 environments/argo2/secrets.sops.example.yaml diff --git a/.gitignore b/.gitignore index a125120..65d866f 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ secrets-*.yaml # Exception: reference environment templates (contain no real secrets) !environments/reference/secrets-*.yaml !environments/argo1/secrets-*.yaml +!environments/argo2/secrets-*.yaml # Helmfile state helmfile.lock diff --git a/argocd/customers/argo2.yaml b/argocd/customers/argo2.yaml new file mode 100644 index 0000000..fa992c3 --- /dev/null +++ b/argocd/customers/argo2.yaml @@ -0,0 +1,11 @@ +customer: argo2 +environment: argo2 +project: countly-customers +server: https://34.60.146.65 +hostname: argo2.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/argo2/README.md b/environments/argo2/README.md new file mode 100644 index 0000000..12b374b --- /dev/null +++ b/environments/argo2/README.md @@ -0,0 +1,69 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + +3. Fill in required secrets in the chart-specific files: + - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `clickhouse.yaml` → `auth.defaultUserPassword.password` + - `kafka.yaml` → `kafkaConnect.clickhouse.password` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `secrets-mongodb.yaml` | MongoDB user passwords | +| `secrets-clickhouse.yaml` | ClickHouse auth password | +| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | +| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/argo2/clickhouse.yaml b/environments/argo2/clickhouse.yaml new file mode 100644 index 0000000..d7899d3 --- /dev/null +++ b/environments/argo2/clickhouse.yaml @@ -0,0 +1,203 @@ +# ============================================================================= +# ClickHouse Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-clickhouse/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Operator API Version --- +clickhouseOperator: + apiVersion: clickhouse.com/v1alpha1 + +# ============================================================================= +# Cluster Topology +# ============================================================================= +version: "26.2" +shards: 1 +replicas: 2 + +# ============================================================================= +# Images +# ============================================================================= +image: + server: clickhouse/clickhouse-server + keeper: clickhouse/clickhouse-keeper + +# ============================================================================= +# Database +# ============================================================================= +database: countly_drill + +# ============================================================================= +# Authentication +# ============================================================================= +auth: + # --- Default User Password --- + defaultUserPassword: + existingSecret: "" # Use an existing secret instead of creating one + secretName: clickhouse-default-password + key: password + password: "" # REQUIRED: ClickHouse default user password + + # --- Admin User (optional, separate from default) --- + adminUser: + enabled: false + # Precomputed SHA256 hex of the admin password (64 hex chars). + # Generate: echo -n 'your_password' | sha256sum | cut -d' ' -f1 + passwordSha256Hex: "" + +# ============================================================================= +# OpenTelemetry Server-Side Tracing +# ============================================================================= +# When enabled, ClickHouse logs spans to system.opentelemetry_span_log for +# queries arriving with W3C traceparent headers. +opentelemetry: + enabled: false + spanLog: + ttlDays: 7 + flushIntervalMs: 1000 + +# ============================================================================= +# Server +# ============================================================================= +server: + securityContext: + runAsNonRoot: true + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + + resources: + requests: + cpu: "1" + memory: "4Gi" + limits: + cpu: "2" + memory: "8Gi" + + persistence: + storageClass: "" + size: 50Gi + + settings: + maxConnections: 4096 + extraConfig: "" # Raw XML injected into server config + extraUsersConfig: "" # Raw XML injected into users config + prometheus: + enabled: true + port: 9363 + endpoint: /metrics + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 + +# ============================================================================= +# Keeper (ClickHouse Keeper for replication coordination) +# ============================================================================= +keeper: + replicas: 1 + + securityContext: + runAsNonRoot: true + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + + resources: + requests: + cpu: "250m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "1Gi" + + persistence: + storageClass: "" + size: 5Gi + + settings: + prometheus: + enabled: true + port: 9090 + endpoint: /metrics + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + +# ============================================================================= +# Pod Disruption Budgets +# ============================================================================= +podDisruptionBudget: + server: + enabled: false + maxUnavailable: 1 + keeper: + enabled: false + maxUnavailable: 1 + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + - kafka + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-analytics-namespace + # ports: + # - port: 8123 + # protocol: TCP + +# ============================================================================= +# Service Monitor (Prometheus Operator CRD) +# ============================================================================= +serviceMonitor: + enabled: false + interval: "15s" + serviceType: headless # headless = per-pod scraping, clusterIP = any-pod + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall diff --git a/environments/argo2/countly-tls.env b/environments/argo2/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/argo2/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/argo2/countly.yaml b/environments/argo2/countly.yaml new file mode 100644 index 0000000..4ba46fa --- /dev/null +++ b/environments/argo2/countly.yaml @@ -0,0 +1,570 @@ +# ============================================================================= +# Countly Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Service Account --- +serviceAccount: + create: true + name: "" # Auto-derived from release name if empty + annotations: {} + +# --- Image --- +image: + repository: gcr.io/countly-dev-313620/countly-unified + digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" + tag: "26.01" # Fallback when digest is empty + pullPolicy: IfNotPresent + +# --- Cross-Namespace References --- +clickhouseNamespace: clickhouse +kafkaNamespace: kafka +mongodbNamespace: mongodb + +# ============================================================================= +# Component: API +# ============================================================================= +api: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:api"] + port: 3001 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 120 + terminationGracePeriodSeconds: 120 + resources: + requests: + cpu: "1" + memory: "3.5Gi" + limits: + cpu: "1" + memory: "4Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 6 + metrics: + cpu: + averageUtilization: 70 + memory: + averageUtilization: 80 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 # 1-100, only used with type=preferred + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Frontend +# ============================================================================= +frontend: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:frontend"] + port: 6001 + healthCheck: + path: /ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + resources: + requests: + cpu: "1" + memory: "2.5Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 1 + metrics: + cpu: + averageUtilization: 80 + memory: {} + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Ingestor +# ============================================================================= +ingestor: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:ingestor"] + port: 3010 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 120 + terminationGracePeriodSeconds: 120 + resources: + requests: + cpu: "1" + memory: "3Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 12 + metrics: + cpu: + averageUtilization: 65 + memory: + averageUtilization: 75 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Aggregator +# ============================================================================= +aggregator: + enabled: true + replicaCount: 4 + command: ["npm", "run", "start:aggregator"] + port: 0 # No HTTP port exposed + healthCheck: {} # No HTTP health check (no port) + terminationGracePeriodSeconds: 60 + resources: + requests: + cpu: "1" + memory: "3.5Gi" + limits: + cpu: "2" + memory: "4Gi" + hpa: + enabled: true + minReplicas: 4 + maxReplicas: 8 + metrics: + cpu: + averageUtilization: 65 + memory: + averageUtilization: 65 + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 10 + periodSeconds: 60 + pdb: + enabled: true + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Job Server +# ============================================================================= +jobserver: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:jobserver"] + port: 3020 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + resources: + requests: + cpu: "1" + memory: "3Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 1 + metrics: + cpu: + averageUtilization: 80 + memory: + averageUtilization: 85 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: false + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Configuration (ConfigMaps) +# ============================================================================= +config: + # --- Common (shared by all components) --- + common: + NODE_ENV: production + COUNTLY_PLUGINS: "mobile,web,desktop,plugins,density,locale,browser,sources,views,logger,systemlogs,populator,reports,crashes,push,star-rating,slipping-away-users,compare,server-stats,dbviewer,crash_symbolication,crash-analytics,alerts,onboarding,consolidate,remote-config,hooks,dashboards,sdk,data-manager,guides,heatmaps,retention_segments,formulas,funnels,cohorts,ab-testing,performance-monitoring,config-transfer,data-migration,two-factor-auth,blocking,concurrent_users,revenue,activity-map,flows,surveys,event-timeline,drill,multi,active_users,ip-blocker,kafka,clickhouse" + COUNTLY_CONFIG__FILESTORAGE: gridfs + COUNTLY_CONFIG__DRILL_EVENTS_DRIVER: clickhouse + COUNTLY_CONFIG__SHARED_CONNECTION: "true" + COUNTLY_CONFIG__DATABASE_ADAPTERPREFERENCE: '["clickhouse","mongodb"]' + COUNTLY_CONFIG__DATABASE_ADAPTERS_MONGODB_ENABLED: "true" + COUNTLY_CONFIG__DATABASE_ADAPTERS_CLICKHOUSE_ENABLED: "true" + COUNTLY_CONFIG__DATABASE_FAILONCONNECTIONERROR: "true" + COUNTLY_CONFIG__EVENTSINK_SINKS: '["kafka"]' + COUNTLY_CONFIG__RELOADCONFIGAFTER: "10000" + + # --- API --- + api: + COUNTLY_CONTAINER: api + COUNTLY_CONFIG__API_PORT: "3001" + COUNTLY_CONFIG__API_HOST: "0.0.0.0" + COUNTLY_CONFIG__API_MAX_SOCKETS: "1024" + COUNTLY_CONFIG__API_MAX_UPLOAD_FILE_SIZE: "209715200" # 200 MiB + COUNTLY_CONFIG__API_TIMEOUT: "120000" # ms + + # --- Frontend --- + frontend: + COUNTLY_CONTAINER: frontend + COUNTLY_CONFIG__WEB_PORT: "6001" + COUNTLY_CONFIG__WEB_HOST: "0.0.0.0" + COUNTLY_CONFIG__WEB_SECURE_COOKIES: "false" + COUNTLY_CONFIG__COOKIE_MAXAGE: "86400000" # 24 hours in ms + + # --- Ingestor --- + ingestor: + COUNTLY_CONTAINER: ingestor + COUNTLY_CONFIG__INGESTOR_PORT: "3010" + COUNTLY_CONFIG__INGESTOR_HOST: "0.0.0.0" + + # --- Aggregator --- + aggregator: + COUNTLY_CONTAINER: aggregator + UV_THREADPOOL_SIZE: "6" + + # --- Job Server --- + jobserver: + COUNTLY_CONTAINER: jobserver + COUNTLY_CONFIG__JOBSERVER_PORT: "3020" + COUNTLY_CONFIG__JOBSERVER_HOST: "0.0.0.0" + + # --- ClickHouse Connection --- + clickhouse: + COUNTLY_CONFIG__CLICKHOUSE_QUERYOPTIONS_MAX_EXECUTION_TIME: "600" + COUNTLY_CONFIG__CLICKHOUSE_REQUEST_TIMEOUT: "1200000" # ms + COUNTLY_CONFIG__CLICKHOUSE_MAX_OPEN_CONNECTIONS: "10" + COUNTLY_CONFIG__CLICKHOUSE_APPLICATION: countly_drill + COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_REQUEST: "false" + COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_RESPONSE: "false" + COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_ENABLED: "true" + COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_IDLE_SOCKET_TTL: "10000" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_NAME: countly_cluster + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_SHARDS: "false" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_REPLICAS: "false" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_ISCLOUD: "false" + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_COORDINATORTYPE: keeper + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_ZKPATH: "/clickhouse/tables/{shard}/{database}/{table}" + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_REPLICANAME: "{replica}" + COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_ENABLED: "false" + COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_MAXPARALLELREPLICAS: "2" + COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_WRITETHROUGH: "true" + COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_INSERTDISTRIBUTEDSYNC: "true" + COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_NATIVEPORT: "9000" + COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_SECURE: "false" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_DAYSOLD: "30" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MIN: "60" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MAX: "120" + + # --- Kafka Connection --- + kafka: + COUNTLY_CONFIG__KAFKA_ENABLED: "true" + COUNTLY_CONFIG__KAFKA_DRILLEVENTSTOPIC: drill-events + COUNTLY_CONFIG__KAFKA_CLUSTER_NAME: cly-kafka + COUNTLY_CONFIG__KAFKA_PARTITIONS: "100" + COUNTLY_CONFIG__KAFKA_REPLICATIONFACTOR: "2" + COUNTLY_CONFIG__KAFKA_RETENTIONMS: "604800000" # 7 days in ms + COUNTLY_CONFIG__KAFKA_ENABLETRANSACTIONS: "false" + COUNTLY_CONFIG__KAFKA_TRANSACTIONTIMEOUT: "60000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_CLIENTID: countly-app + COUNTLY_CONFIG__KAFKA_RDKAFKA_REQUESTTIMEOUTMS: "20000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_CONNECTIONTIMEOUTMS: "8000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_LINGERMS: "10" + COUNTLY_CONFIG__KAFKA_RDKAFKA_RETRIES: "5" + COUNTLY_CONFIG__KAFKA_RDKAFKA_ACKS: "-1" # -1 = all ISR replicas must acknowledge + COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMINBYTES: "1024000" + COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMAXWAITMS: "1200" + COUNTLY_CONFIG__KAFKA_CONSUMER_SESSIONTIMEOUTMS: "120000" + COUNTLY_CONFIG__KAFKA_CONSUMER_HEARTBEATINTERVALMS: "20000" + COUNTLY_CONFIG__KAFKA_CONSUMER_AUTOOFFSETRESET: earliest + COUNTLY_CONFIG__KAFKA_CONSUMER_ENABLEAUTOCOMMIT: "false" + COUNTLY_CONFIG__KAFKA_CONSUMER_MAXPOLLINTERVALMS: "600000" + COUNTLY_CONFIG__KAFKA_CONNECTCONSUMERGROUPID: "connect-ch" + + # --- OpenTelemetry --- + otel: + OTEL_ENABLED: "false" + OTEL_EXPORTER_OTLP_ENDPOINT: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4318" + OTEL_EXPORTER_OTLP_PROTOCOL: "http/protobuf" + OTEL_TRACES_SAMPLER: "parentbased_traceidratio" + OTEL_TRACES_SAMPLER_ARG: "1.0" # 0.0-1.0, fraction of traces to sample + PYROSCOPE_ENABLED: "false" + +# --- Node.js Options (injected into configmap per component) --- +nodeOptions: + api: "--max-old-space-size=3072 --max-semi-space-size=256" + frontend: "--max-old-space-size=2048" + ingestor: "--max-old-space-size=2048 --max-semi-space-size=256" + aggregator: "--max-old-space-size=3072 --max-semi-space-size=128" + jobserver: "--max-old-space-size=2048 --max-semi-space-size=256" + +# ============================================================================= +# Backing Service Modes +# ============================================================================= +# When mode=external, the corresponding chart is not deployed and connection +# details below are used instead. + +backingServices: + mongodb: + mode: bundled # bundled | external + host: "" + port: "27017" + connectionString: "" # If set, used as-is (bypasses host/port/user/pass) + username: "app" + password: "" + database: "admin" + replicaSet: "" + existingSecret: "" + # --- External MongoDB Atlas example --- + # mode: external + # connectionString: "mongodb+srv://user:pass@cluster0.example.mongodb.net/admin?retryWrites=true&w=majority" + + clickhouse: + mode: bundled # bundled | external + host: "" + port: "8123" + tls: "false" + username: "default" + password: "" + database: "countly_drill" + existingSecret: "" + # --- External ClickHouse Cloud example --- + # mode: external + # host: "abc123.us-east-1.aws.clickhouse.cloud" + # port: "8443" + # tls: "true" + + kafka: + mode: bundled # bundled | external + brokers: "" # Comma-separated broker list + securityProtocol: "PLAINTEXT" # PLAINTEXT | SSL | SASL_PLAINTEXT | SASL_SSL + saslMechanism: "" + saslUsername: "" + saslPassword: "" + existingSecret: "" + # --- External Confluent Cloud example --- + # mode: external + # brokers: "pkc-12345.us-east-1.aws.confluent.cloud:9092" + # securityProtocol: "SASL_SSL" + # saslMechanism: "PLAIN" + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + mode: values # values | existingSecret | externalSecret + keep: true # Retain secrets on helm uninstall + rotationId: "" # Change to force secret re-creation + + common: + existingSecret: "" + encryptionReportsKey: "" # REQUIRED: min 8 chars + webSessionSecret: "" # REQUIRED: min 8 chars + passwordSecret: "" # REQUIRED: min 8 chars + + clickhouse: + existingSecret: "" + username: "" + password: "" + database: "" + + kafka: + existingSecret: "" + securityProtocol: "" + saslMechanism: "" + saslUsername: "" + saslPassword: "" + + mongodb: + existingSecret: "" + key: "connectionString.standard" # Key within the secret to read + password: "" # REQUIRED on first install (must match users.app.password in countly-mongodb) + + # --- ExternalSecret configuration (used only when mode=externalSecret) --- + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: "" + webSessionSecret: "" + passwordSecret: "" + clickhouse: + url: "" + username: "" + password: "" + database: "" + kafka: + brokers: "" + securityProtocol: "" + saslMechanism: "" + saslUsername: "" + saslPassword: "" + mongodb: + connectionString: "" + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + ingressNamespaceSelector: + kubernetes.io/metadata.name: ingress-nginx + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-custom-namespace + # ports: + # - port: 3001 + # protocol: TCP + +# ============================================================================= +# Ingress +# ============================================================================= +ingress: + enabled: true + className: nginx + annotations: + # F5 NGINX Ingress Controller (OSS) annotations + nginx.org/client-max-body-size: "50m" + nginx.org/proxy-buffering: "True" + nginx.org/proxy-buffer-size: "256k" + nginx.org/proxy-buffers: "16 256k" + nginx.org/proxy-busy-buffers-size: "512k" + nginx.org/proxy-max-temp-file-size: "2048m" + nginx.org/client-body-buffer-size: "2m" + nginx.org/proxy-connect-timeout: "60s" + nginx.org/proxy-read-timeout: "120s" + nginx.org/proxy-send-timeout: "120s" + nginx.org/keepalive: "256" + nginx.org/server-snippets: | + otel_trace on; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_set_header Connection ""; + proxy_set_header X-Request-ID $request_id; + proxy_set_header X-Request-Start $msec; + # traceparent/tracestate now handled by ngx_otel_module (otel-trace-context: propagate) + client_header_timeout 30s; + nginx.org/location-snippets: | + proxy_request_buffering on; + proxy_next_upstream error timeout http_502 http_503 http_504; + proxy_next_upstream_timeout 30s; + proxy_next_upstream_tries 3; + proxy_temp_file_write_size 1m; + client_body_timeout 120s; + hostname: countly.example.com + tls: + # TLS mode: letsencrypt | existingSecret | selfSigned | http + # http: No TLS + # letsencrypt: cert-manager + Let's Encrypt (recommended for production) + # existingSecret: Bring your own TLS secret + # selfSigned: cert-manager self-signed CA (for development) + mode: http + clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt + secretName: "" # Auto-derived if empty: -tls + selfSigned: + issuerName: "" # Auto-derived if empty: -ca-issuer + caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/environments/argo2/external-secrets.example.yaml b/environments/argo2/external-secrets.example.yaml new file mode 100644 index 0000000..7bf93ef --- /dev/null +++ b/environments/argo2/external-secrets.example.yaml @@ -0,0 +1,36 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in environments//countly.yaml: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: my-secret-store +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "countly/encryption-reports-key" +# webSessionSecret: "countly/web-session-secret" +# passwordSecret: "countly/password-secret" +# clickhouse: +# url: "countly/clickhouse-url" +# username: "countly/clickhouse-username" +# password: "countly/clickhouse-password" +# database: "countly/clickhouse-database" +# kafka: +# brokers: "countly/kafka-brokers" +# securityProtocol: "countly/kafka-security-protocol" +# mongodb: +# connectionString: "countly/mongodb-connection-string" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/argo2/global.yaml b/environments/argo2/global.yaml new file mode 100644 index 0000000..6996309 --- /dev/null +++ b/environments/argo2/global.yaml @@ -0,0 +1,26 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + storageClass: "" + imagePullSecrets: [] + +ingress: + hostname: argo2.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/argo2/kafka.yaml b/environments/argo2/kafka.yaml new file mode 100644 index 0000000..726decd --- /dev/null +++ b/environments/argo2/kafka.yaml @@ -0,0 +1,384 @@ +# ============================================================================= +# Kafka Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-kafka/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Strimzi Operator API Version --- +strimzi: + apiVersion: kafka.strimzi.io/v1 + +# --- Kafka Version --- +version: "4.2.0" + +# ============================================================================= +# Brokers +# ============================================================================= +brokers: + replicas: 3 + resources: + requests: + cpu: "1" + memory: "4Gi" + limits: + cpu: "1" + memory: "4Gi" + jvmOptions: + xms: "2g" + xmx: "2g" + + # --- Persistence --- + persistence: + volumes: + - id: 0 + size: 100Gi + storageClass: "" + deleteClaim: false # Delete PVC when broker is removed + + # --- Broker Config --- + config: + default.replication.factor: 2 + min.insync.replicas: 2 + log.retention.hours: 168 # 7 days + log.segment.bytes: "1073741824" # 1 GiB + compression.type: lz4 + auto.create.topics.enable: false + offsets.topic.replication.factor: 2 + num.partitions: 24 + transaction.state.log.replication.factor: 2 + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +# ============================================================================= +# Controllers (KRaft) +# ============================================================================= +controllers: + replicas: 3 + resources: + requests: + cpu: "500m" + memory: "2Gi" + limits: + cpu: "1" + memory: "2Gi" + + persistence: + size: 20Gi + storageClass: "" + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +# ============================================================================= +# Listeners +# ============================================================================= +listeners: + - name: internal + port: 9092 + type: internal + tls: false + +# ============================================================================= +# Cruise Control +# ============================================================================= +cruiseControl: + enabled: true + resources: + requests: + cpu: "1" + memory: "2Gi" + limits: + cpu: "1" + memory: "2Gi" + jvmOptions: + xms: "1g" + xmx: "2g" + autoRebalance: + - mode: add-brokers + - mode: remove-brokers + +# ============================================================================= +# Kafka Connect (ClickHouse Sink) +# ============================================================================= +kafkaConnect: + enabled: true + name: connect-ch + image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + replicas: 2 + bootstrapServers: "" # Auto-derived from cluster if empty + + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "2" + memory: "8Gi" + jvmOptions: + xms: "5g" + xmx: "5g" + + # --- Worker Configuration --- + workerConfig: + group.id: connect-ch + config.storage.topic: connect_ch_configs + offset.storage.topic: connect_ch_offsets + status.storage.topic: connect_ch_status + config.storage.replication.factor: 2 + offset.storage.replication.factor: 2 + status.storage.replication.factor: 2 + offset.storage.partitions: 25 + status.storage.partitions: 5 + key.converter: org.apache.kafka.connect.storage.StringConverter + value.converter: org.apache.kafka.connect.json.JsonConverter + value.converter.schemas.enable: "false" + connector.client.config.override.policy: All + config.providers: env + config.providers.env.class: org.apache.kafka.common.config.provider.EnvVarConfigProvider + + # --- ClickHouse Connection (for the sink connector) --- + clickhouse: + existingSecret: "" + secretName: clickhouse-auth + host: "" # Auto-derived from clickhouseNamespace if empty + port: "8123" + ssl: "false" + database: "countly_drill" + username: "default" + password: "" # REQUIRED: must match ClickHouse default user password + + # --- Environment Variables (injected into Connect pods) --- + env: + EXACTLY_ONCE: "false" + ERRORS_RETRY_TIMEOUT: "300" + ERRORS_TOLERANCE: "none" # none | all + CLICKHOUSE_SETTINGS: "input_format_binary_read_json_as_string=1,allow_experimental_json_type=1,enable_json_type=1,async_insert=1,wait_for_async_insert=1,async_insert_use_adaptive_busy_timeout=1,async_insert_busy_timeout_ms=10000,async_insert_max_data_size=268435456,async_insert_max_query_number=64,min_insert_block_size_rows=250000,min_insert_block_size_bytes=268435456,max_partitions_per_insert_block=500" + BYPASS_ROW_BINARY: "false" + TABLE_REFRESH_INTERVAL: "300" # seconds + KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter + VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter + VALUE_CONVERTER_SCHEMAS_ENABLE: "false" + KAFKA_CONSUMER_FETCH_MIN_BYTES: "33554432" # 32 MiB + KAFKA_CONSUMER_FETCH_MAX_WAIT_MS: "60000" + KAFKA_CONSUMER_MAX_POLL_RECORDS: "250000" + KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES: "134217728" # 128 MiB + KAFKA_CONSUMER_FETCH_MAX_BYTES: "536870912" # 512 MiB + KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS: "900000" + KAFKA_CONSUMER_SESSION_TIMEOUT_MS: "45000" + KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS: "15000" + KAFKA_CONSUMER_REQUEST_TIMEOUT_MS: "120000" + + # --- HPA --- + hpa: + enabled: false + minReplicas: 1 + maxReplicas: 3 + metrics: + cpu: + averageUtilization: 70 + memory: + averageUtilization: 80 + behavior: + scaleUp: + stabilizationWindowSeconds: 120 + policies: + - type: Percent + value: 50 + periodSeconds: 120 + - type: Pods + value: 2 + periodSeconds: 120 + selectPolicy: Min + scaleDown: + stabilizationWindowSeconds: 600 + policies: + - type: Percent + value: 25 + periodSeconds: 300 + - type: Pods + value: 1 + periodSeconds: 300 + selectPolicy: Min + + # --- OpenTelemetry Java Agent --- + # Baked into the Docker image at /opt/otel/opentelemetry-javaagent.jar. + # When enabled, JAVA_TOOL_OPTIONS activates it for Kafka consumer/producer + # and outbound HTTP (ClickHouse sink) span creation. + otel: + enabled: false + serviceName: "kafka-connect" + exporterEndpoint: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4317" + exporterProtocol: "grpc" + sampler: "parentbased_traceidratio" + samplerArg: "1.0" + resourceAttributes: "" # e.g. "deployment.environment=production,k8s.cluster.name=my-cluster" + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + + # --- Connectors --- + connectors: + - name: ch-sink-drill-events + enabled: true + state: running # running | paused | stopped + class: com.clickhouse.kafka.connect.ClickHouseSinkConnector + tasksMax: 1 + autoRestart: + enabled: true + maxRestarts: 10 + config: + topics: drill-events + topic2TableMap: "drill-events=drill_events" + hostname: "${env:CLICKHOUSE_HOST}" + port: "${env:CLICKHOUSE_PORT}" + ssl: "${env:CLICKHOUSE_SSL}" + database: "${env:CLICKHOUSE_DB}" + username: "${env:CLICKHOUSE_USER}" + password: "${env:CLICKHOUSE_PASSWORD}" + exactlyOnce: "${env:EXACTLY_ONCE}" + errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" + errors.tolerance: "${env:ERRORS_TOLERANCE}" + clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" + bypassRowBinary: "${env:BYPASS_ROW_BINARY}" + tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" + key.converter: "${env:KEY_CONVERTER}" + value.converter: "${env:VALUE_CONVERTER}" + value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" + consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" + consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" + consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" + consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" + consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" + consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" + consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" + consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" + consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" + connection.timeout: "60" + socket.timeout: "30000" + retry.count: "3" + connection.pool.size: "10" + healthcheck.enabled: "true" + healthcheck.interval: "10000" + dlq: {} # Dead-letter queue config (empty = disabled) + +# ============================================================================= +# Metrics +# ============================================================================= +metrics: + enabled: true + +# --- Cross-Namespace Reference --- +clickhouseNamespace: clickhouse + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 9092 + # protocol: TCP + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall +# Kafka overrides for argo2 +# +# Migration is disabled for this customer, so keep the drill sink connector off. + +kafkaConnect: + connectors: + - name: ch-sink-drill-events + enabled: false + state: running + class: com.clickhouse.kafka.connect.ClickHouseSinkConnector + tasksMax: 1 + autoRestart: + enabled: true + maxRestarts: 10 + config: + topics: drill-events + topic2TableMap: "drill-events=drill_events" + hostname: "${env:CLICKHOUSE_HOST}" + port: "${env:CLICKHOUSE_PORT}" + ssl: "${env:CLICKHOUSE_SSL}" + database: "${env:CLICKHOUSE_DB}" + username: "${env:CLICKHOUSE_USER}" + password: "${env:CLICKHOUSE_PASSWORD}" + exactlyOnce: "${env:EXACTLY_ONCE}" + errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" + errors.tolerance: "${env:ERRORS_TOLERANCE}" + clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" + bypassRowBinary: "${env:BYPASS_ROW_BINARY}" + tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" + key.converter: "${env:KEY_CONVERTER}" + value.converter: "${env:VALUE_CONVERTER}" + value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" + consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" + consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" + consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" + consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" + consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" + consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" + consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" + consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" + consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" + connection.timeout: "60" + socket.timeout: "30000" + retry.count: "3" + connection.pool.size: "10" + healthcheck.enabled: "true" + healthcheck.interval: "10000" + dlq: {} diff --git a/environments/argo2/migration.yaml b/environments/argo2/migration.yaml new file mode 100644 index 0000000..6fa760c --- /dev/null +++ b/environments/argo2/migration.yaml @@ -0,0 +1,3 @@ +# Migration overrides for optional countly-migration app. +# Enable per customer by setting `migration: enabled` in argocd/customers/.yaml +# and then filling this file with environment-specific overrides as needed. diff --git a/environments/argo2/mongodb.yaml b/environments/argo2/mongodb.yaml new file mode 100644 index 0000000..31f230e --- /dev/null +++ b/environments/argo2/mongodb.yaml @@ -0,0 +1,144 @@ +# ============================================================================= +# MongoDB Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-mongodb/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# ============================================================================= +# MongoDB Server +# ============================================================================= +mongodb: + version: "8.2.5" + members: 2 # Replica set member count + + resources: + requests: + cpu: "500m" + memory: "2Gi" + limits: + cpu: "2" + memory: "8Gi" + + persistence: + storageClass: "" # Overrides global.storageClass for MongoDB PVCs + size: 100Gi + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 + + # --- TLS --- + tls: + enabled: false + +# ============================================================================= +# Users +# ============================================================================= +users: + # --- Application User --- + app: + name: app + database: admin + roles: + - name: readWriteAnyDatabase + db: admin + - name: dbAdmin + db: admin + passwordSecretName: app-user-password + passwordSecretKey: password + password: "" # REQUIRED on first install + + # --- Metrics Exporter User --- + metrics: + enabled: true + name: metrics + database: admin + roles: + - name: clusterMonitor + db: admin + - name: read + db: local + passwordSecretName: metrics-user-password + passwordSecretKey: password + password: "" # REQUIRED on first install + +# ============================================================================= +# Prometheus Exporter +# ============================================================================= +exporter: + enabled: true + image: percona/mongodb_exporter:0.40.0 + port: 9216 + resources: + requests: + cpu: "50m" + memory: "64Mi" + limits: + cpu: "200m" + memory: "256Mi" + service: + enabled: true + args: + - --collect-all + - --collector.diagnosticdata + - --collector.replicasetstatus + - --collector.dbstats + - --collector.topmetrics + - --collector.indexstats + - --collector.collstats + +# ============================================================================= +# Pod Disruption Budget +# ============================================================================= +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 27017 + # protocol: TCP + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall diff --git a/environments/argo2/observability.yaml b/environments/argo2/observability.yaml new file mode 100644 index 0000000..79a4b39 --- /dev/null +++ b/environments/argo2/observability.yaml @@ -0,0 +1,437 @@ +# ============================================================================= +# Observability Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-observability/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Deployment Mode --- +# full — All backends + Grafana in-cluster +# hybrid — All backends in-cluster, no Grafana (use external Grafana) +# external — Collectors only, forward to external endpoints +# disabled — Set in global.yaml observability.mode to skip this chart entirely +mode: full + +# --- Cluster Name (injected into Prometheus external_labels) --- +clusterName: countly-local + +# --- Cross-Namespace References --- +countlyNamespace: countly +clickhouseNamespace: clickhouse +mongodbNamespace: mongodb +kafkaNamespace: kafka +ingressNamespace: ingress-nginx +certManagerNamespace: cert-manager +clickhouseOperatorNamespace: clickhouse-operator-system + +# --- NGINX Ingress Controller Scrape Configuration --- +nginxIngress: + podLabelName: "nginx-ingress" # F5 NGINX IC = "nginx-ingress", community = "ingress-nginx" + metricsPort: "9113" + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +# ============================================================================= +# Per-Signal Configuration +# ============================================================================= + +# --- Metrics --- +metrics: + enabled: true + sampling: + interval: "15s" # Global Prometheus scrape_interval + +# --- Traces --- +traces: + enabled: true + sampling: + strategy: "AlwaysOn" # AlwaysOn | TraceIdRatio | ParentBased | TailBased + ratio: 1.0 # 0.0-1.0, used with TraceIdRatio or ParentBased + tailSampling: # Only used when strategy == TailBased + waitDuration: "10s" + numTraces: 50000 + policies: + keepErrors: true + latencyThresholdMs: 2000 + baselineRatio: 0.1 + +# --- Logs --- +logs: + enabled: true + sampling: + enabled: false + dropRate: 0 # 0.0-1.0, fraction of logs to drop + +# --- Profiling --- +profiling: + enabled: true + sampling: + rate: "100" # Advisory — used in NOTES.txt for SDK config + +# ============================================================================= +# Prometheus +# ============================================================================= +prometheus: + image: + repository: prom/prometheus + tag: "v3.10.0" + retention: + time: "30d" + size: "50GB" + storage: + size: 100Gi + storageClass: "" + resources: + requests: + cpu: "2" + memory: "3Gi" + limits: + cpu: "2" + memory: "4Gi" + extraArgs: [] + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + remoteWriteUrl: "" # Full Prometheus remote write URL (e.g. https://prom.corp.com/api/v1/write) + +# ============================================================================= +# Loki +# ============================================================================= +loki: + image: + repository: grafana/loki + tag: "3.6.7" + retention: "30d" + storage: + backend: "filesystem" # filesystem | s3 | gcs | azure + size: 100Gi + storageClass: "" + # Object storage settings (only used when backend != filesystem) + bucket: "" # Bucket/container name (REQUIRED for object backends) + endpoint: "" # Custom endpoint (e.g. MinIO: http://minio:9000) + region: "" # Cloud region (S3) + insecure: false # Use HTTP instead of HTTPS + forcePathStyle: false # S3 path-style access (required for MinIO) + # Credential file secret (for GCS JSON key files) + existingSecret: "" # K8s Secret name to mount + secretKey: "key.json" # Key within the Secret + secretMountPath: "/var/secrets/storage" + # Env-based credentials (for AWS access keys, Azure account keys) + envFromSecret: "" # K8s Secret name to inject as env vars + # Provider-specific passthrough (rendered directly into provider block) + config: {} + # + # --- Object storage examples (apply to loki, tempo, and pyroscope) --- + # + # --- AWS S3 example --- + # backend: s3 + # s3: + # bucket: my-loki-data + # region: us-east-1 + # endpoint: "" + # insecure: false + # forcePathStyle: false + # credentials: + # source: envFromSecret + # envFromSecret: loki-s3-credentials # Must contain AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY + # + # --- GCS example --- + # backend: gcs + # gcs: + # bucket: my-loki-data + # credentials: + # source: existingSecret + # existingSecret: loki-gcs-key + # secretKey: "key.json" + # + # --- MinIO example --- + # backend: s3 + # s3: + # bucket: loki + # endpoint: minio.storage.svc.cluster.local:9000 + # insecure: true + # forcePathStyle: true + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + config: + maxStreamsPerUser: 30000 + maxLineSize: 256000 + ingestionRateMb: 64 + ingestionBurstSizeMb: 128 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + pushUrl: "" # Full Loki push URL (e.g. https://loki.corp.com/loki/api/v1/push) + +# ============================================================================= +# Tempo +# ============================================================================= +tempo: + image: + repository: grafana/tempo + tag: "2.10.1" + retention: "12h" + storage: + backend: "local" # local | s3 | gcs | azure + size: 150Gi + storageClass: "" + bucket: "" + endpoint: "" + region: "" + insecure: false + forcePathStyle: false + existingSecret: "" + secretKey: "key.json" + secretMountPath: "/var/secrets/storage" + envFromSecret: "" + config: {} + resources: + requests: + cpu: "3" + memory: "6Gi" + limits: + cpu: "4" + memory: "10Gi" + config: + ingestionRateLimitBytes: 100000000 + ingestionBurstSizeBytes: 150000000 + maxTracesPerUser: 50000 + maxBytesPerTrace: 5000000 + maxRecvMsgSizeMiB: 16 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + otlpGrpcEndpoint: "" # Tempo OTLP gRPC host:port (e.g. tempo.corp.com:4317) + otlpHttpEndpoint: "" # Tempo OTLP HTTP URL (optional fallback) + +# ============================================================================= +# Pyroscope +# ============================================================================= +pyroscope: + image: + repository: grafana/pyroscope + tag: "1.18.1" + retention: "72h" + storage: + backend: "filesystem" # filesystem | s3 | gcs | azure | swift + size: 20Gi + storageClass: "" + bucket: "" + endpoint: "" + region: "" + insecure: false + forcePathStyle: false + existingSecret: "" + secretKey: "key.json" + secretMountPath: "/var/secrets/storage" + envFromSecret: "" + config: {} + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + ingestUrl: "" # Pyroscope ingest URL (e.g. https://pyroscope.corp.com) + +# ============================================================================= +# Grafana +# ============================================================================= +grafana: + enabled: true # Only deployed when mode == "full" + image: + repository: grafana/grafana + tag: "12.4.0" + admin: + existingSecret: "" # Use an existing Secret for admin credentials + userKey: "admin-user" + passwordKey: "admin-password" + persistence: + enabled: false # Ephemeral by default (declarative config, no state to lose) + size: 10Gi + storageClass: "" + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + plugins: + install: "grafana-pyroscope-datasource" + featureToggles: "tempoSearch,tempoBackendSearch,traceqlEditor,exploreTraces" + dashboards: + enabled: true + overview: true + platform: true + countly: true + data: true + edge: true + pdb: + enabled: false + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + url: "" # External Grafana URL (for NOTES.txt only) + +# ============================================================================= +# Alloy (DaemonSet — log collection) +# ============================================================================= +alloy: + image: + repository: grafana/alloy + tag: "v1.13.2" + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + scheduling: + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + +# ============================================================================= +# Alloy-OTLP (Deployment — OTLP traces + profiling receive) +# ============================================================================= +alloyOtlp: + image: + repository: grafana/alloy + tag: "v1.13.2" + replicas: 1 + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + memoryLimiter: + limit: "1600MiB" # Must be < resources.limits.memory + spikeLimit: "400MiB" + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# Alloy-Metrics (Deployment — ALL Prometheus scraping) +# ============================================================================= +alloyMetrics: + image: + repository: grafana/alloy + tag: "v1.13.2" + replicas: 1 + resources: + requests: + cpu: "500m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "512Mi" + pdb: + enabled: false + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# kube-state-metrics +# ============================================================================= +kubeStateMetrics: + enabled: true + image: + repository: registry.k8s.io/kube-state-metrics/kube-state-metrics + tag: "v2.18.0" + resources: + requests: + cpu: "10m" + memory: "32Mi" + limits: + cpu: "100m" + memory: "256Mi" + namespaces: + - countly + - observability + - ingress-nginx + - kube-system + - clickhouse + - mongodb + - kafka + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# node-exporter +# ============================================================================= +nodeExporter: + enabled: true + image: + repository: prom/node-exporter + tag: "v1.10.2" + resources: + requests: + cpu: "100m" + memory: "180Mi" + limits: + cpu: "250m" + memory: "300Mi" + +# ============================================================================= +# Ingress (for Grafana) +# ============================================================================= +ingress: + enabled: false + className: nginx + annotations: {} + hosts: + - host: obs.example.com + tls: [] + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 4318 + # protocol: TCP diff --git a/environments/argo2/secrets-clickhouse.yaml b/environments/argo2/secrets-clickhouse.yaml new file mode 100644 index 0000000..9f97b3e --- /dev/null +++ b/environments/argo2/secrets-clickhouse.yaml @@ -0,0 +1,4 @@ +# ClickHouse direct secrets for argo2 +auth: + defaultUserPassword: + password: "argo2-clickhouse-2026" diff --git a/environments/argo2/secrets-countly.yaml b/environments/argo2/secrets-countly.yaml new file mode 100644 index 0000000..07a08be --- /dev/null +++ b/environments/argo2/secrets-countly.yaml @@ -0,0 +1,15 @@ +# Countly direct secrets for argo2 +secrets: + mode: values + common: + encryptionReportsKey: "argo2-reports-key-2026" + webSessionSecret: "argo2-web-session-2026" + passwordSecret: "argo2-password-secret-2026" + clickhouse: + username: "default" + password: "argo2-clickhouse-2026" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + mongodb: + password: "argo2-mongo-2026" diff --git a/environments/argo2/secrets-kafka.yaml b/environments/argo2/secrets-kafka.yaml new file mode 100644 index 0000000..f6e2e07 --- /dev/null +++ b/environments/argo2/secrets-kafka.yaml @@ -0,0 +1,4 @@ +# Kafka direct secrets for argo2 +kafkaConnect: + clickhouse: + password: "argo2-clickhouse-2026" diff --git a/environments/argo2/secrets-migration.yaml b/environments/argo2/secrets-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/argo2/secrets-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/argo2/secrets-mongodb.yaml b/environments/argo2/secrets-mongodb.yaml new file mode 100644 index 0000000..85a0dd4 --- /dev/null +++ b/environments/argo2/secrets-mongodb.yaml @@ -0,0 +1,7 @@ +# MongoDB direct secrets for argo2 +users: + app: + password: "argo2-mongo-2026" + metrics: + enabled: true + password: "argo2-metrics-2026" diff --git a/environments/argo2/secrets-observability.yaml b/environments/argo2/secrets-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/argo2/secrets-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/argo2/secrets.example.yaml b/environments/argo2/secrets.example.yaml new file mode 100644 index 0000000..282eb0d --- /dev/null +++ b/environments/argo2/secrets.example.yaml @@ -0,0 +1,42 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//secrets-countly.yaml) --- +secrets: + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- +users: + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//secrets-kafka.yaml) --- +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" diff --git a/environments/argo2/secrets.sops.example.yaml b/environments/argo2/secrets.sops.example.yaml new file mode 100644 index 0000000..9b652d1 --- /dev/null +++ b/environments/argo2/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//secrets-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//secrets-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From d6ea447ad9946903a3b2006910c1a8d1bdc12a92 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 14:24:42 +0530 Subject: [PATCH 27/79] Remove Countly ingress defaults from customer envs --- argocd/README.md | 3 +++ environments/argo1/countly.yaml | 4 ++-- environments/argo2/countly.yaml | 4 ++-- environments/reference/countly.yaml | 4 ++-- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/argocd/README.md b/argocd/README.md index b8d971f..11cabc8 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -72,3 +72,6 @@ Customer metadata is the source of truth for: - `observability` - `kafkaConnect` - `migration` + +Do not set `ingress.hostname` or `ingress.tls.mode` in `environments//countly.yaml`. +Those are driven from the customer metadata file and passed explicitly by the Countly ApplicationSet. diff --git a/environments/argo1/countly.yaml b/environments/argo1/countly.yaml index 5c6ddfa..ccf54bb 100644 --- a/environments/argo1/countly.yaml +++ b/environments/argo1/countly.yaml @@ -555,14 +555,14 @@ ingress: proxy_next_upstream_tries 3; proxy_temp_file_write_size 1m; client_body_timeout 120s; - hostname: argo1.count.ly + hostname: "" # Set via argocd/customers/.yaml tls: # TLS mode: letsencrypt | existingSecret | selfSigned | http # http: No TLS # letsencrypt: cert-manager + Let's Encrypt (recommended for production) # existingSecret: Bring your own TLS secret # selfSigned: cert-manager self-signed CA (for development) - mode: letsencrypt + mode: "" # Set via argocd/customers/.yaml clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt secretName: "" # Auto-derived if empty: -tls selfSigned: diff --git a/environments/argo2/countly.yaml b/environments/argo2/countly.yaml index 4ba46fa..ccf54bb 100644 --- a/environments/argo2/countly.yaml +++ b/environments/argo2/countly.yaml @@ -555,14 +555,14 @@ ingress: proxy_next_upstream_tries 3; proxy_temp_file_write_size 1m; client_body_timeout 120s; - hostname: countly.example.com + hostname: "" # Set via argocd/customers/.yaml tls: # TLS mode: letsencrypt | existingSecret | selfSigned | http # http: No TLS # letsencrypt: cert-manager + Let's Encrypt (recommended for production) # existingSecret: Bring your own TLS secret # selfSigned: cert-manager self-signed CA (for development) - mode: http + mode: "" # Set via argocd/customers/.yaml clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt secretName: "" # Auto-derived if empty: -tls selfSigned: diff --git a/environments/reference/countly.yaml b/environments/reference/countly.yaml index 4ba46fa..ccf54bb 100644 --- a/environments/reference/countly.yaml +++ b/environments/reference/countly.yaml @@ -555,14 +555,14 @@ ingress: proxy_next_upstream_tries 3; proxy_temp_file_write_size 1m; client_body_timeout 120s; - hostname: countly.example.com + hostname: "" # Set via argocd/customers/.yaml tls: # TLS mode: letsencrypt | existingSecret | selfSigned | http # http: No TLS # letsencrypt: cert-manager + Let's Encrypt (recommended for production) # existingSecret: Bring your own TLS secret # selfSigned: cert-manager self-signed CA (for development) - mode: http + mode: "" # Set via argocd/customers/.yaml clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt secretName: "" # Auto-derived if empty: -tls selfSigned: From f847d6705a8ddb0a8fc0e89aec5d2c308c4f9205 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 15:15:38 +0530 Subject: [PATCH 28/79] Add argo3 test customer --- .gitignore | 1 + argocd/customers/argo3.yaml | 11 + environments/argo3/README.md | 69 +++ environments/argo3/clickhouse.yaml | 203 +++++++ environments/argo3/countly-tls.env | 7 + environments/argo3/countly.yaml | 570 ++++++++++++++++++ .../argo3/external-secrets.example.yaml | 36 ++ environments/argo3/global.yaml | 26 + environments/argo3/kafka.yaml | 384 ++++++++++++ environments/argo3/migration.yaml | 3 + environments/argo3/mongodb.yaml | 144 +++++ environments/argo3/observability.yaml | 437 ++++++++++++++ environments/argo3/secrets-clickhouse.yaml | 4 + environments/argo3/secrets-countly.yaml | 15 + environments/argo3/secrets-kafka.yaml | 4 + environments/argo3/secrets-migration.yaml | 2 + environments/argo3/secrets-mongodb.yaml | 7 + environments/argo3/secrets-observability.yaml | 2 + environments/argo3/secrets.example.yaml | 42 ++ environments/argo3/secrets.sops.example.yaml | 21 + 20 files changed, 1988 insertions(+) create mode 100644 argocd/customers/argo3.yaml create mode 100644 environments/argo3/README.md create mode 100644 environments/argo3/clickhouse.yaml create mode 100644 environments/argo3/countly-tls.env create mode 100644 environments/argo3/countly.yaml create mode 100644 environments/argo3/external-secrets.example.yaml create mode 100644 environments/argo3/global.yaml create mode 100644 environments/argo3/kafka.yaml create mode 100644 environments/argo3/migration.yaml create mode 100644 environments/argo3/mongodb.yaml create mode 100644 environments/argo3/observability.yaml create mode 100644 environments/argo3/secrets-clickhouse.yaml create mode 100644 environments/argo3/secrets-countly.yaml create mode 100644 environments/argo3/secrets-kafka.yaml create mode 100644 environments/argo3/secrets-migration.yaml create mode 100644 environments/argo3/secrets-mongodb.yaml create mode 100644 environments/argo3/secrets-observability.yaml create mode 100644 environments/argo3/secrets.example.yaml create mode 100644 environments/argo3/secrets.sops.example.yaml diff --git a/.gitignore b/.gitignore index 65d866f..3919e59 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ secrets-*.yaml !environments/reference/secrets-*.yaml !environments/argo1/secrets-*.yaml !environments/argo2/secrets-*.yaml +!environments/argo3/secrets-*.yaml # Helmfile state helmfile.lock diff --git a/argocd/customers/argo3.yaml b/argocd/customers/argo3.yaml new file mode 100644 index 0000000..557880b --- /dev/null +++ b/argocd/customers/argo3.yaml @@ -0,0 +1,11 @@ +customer: argo3 +environment: argo3 +project: countly-customers +server: https://34.58.215.60 +hostname: argo3.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/argo3/README.md b/environments/argo3/README.md new file mode 100644 index 0000000..12b374b --- /dev/null +++ b/environments/argo3/README.md @@ -0,0 +1,69 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + +3. Fill in required secrets in the chart-specific files: + - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `clickhouse.yaml` → `auth.defaultUserPassword.password` + - `kafka.yaml` → `kafkaConnect.clickhouse.password` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `secrets-mongodb.yaml` | MongoDB user passwords | +| `secrets-clickhouse.yaml` | ClickHouse auth password | +| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | +| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/argo3/clickhouse.yaml b/environments/argo3/clickhouse.yaml new file mode 100644 index 0000000..d7899d3 --- /dev/null +++ b/environments/argo3/clickhouse.yaml @@ -0,0 +1,203 @@ +# ============================================================================= +# ClickHouse Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-clickhouse/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Operator API Version --- +clickhouseOperator: + apiVersion: clickhouse.com/v1alpha1 + +# ============================================================================= +# Cluster Topology +# ============================================================================= +version: "26.2" +shards: 1 +replicas: 2 + +# ============================================================================= +# Images +# ============================================================================= +image: + server: clickhouse/clickhouse-server + keeper: clickhouse/clickhouse-keeper + +# ============================================================================= +# Database +# ============================================================================= +database: countly_drill + +# ============================================================================= +# Authentication +# ============================================================================= +auth: + # --- Default User Password --- + defaultUserPassword: + existingSecret: "" # Use an existing secret instead of creating one + secretName: clickhouse-default-password + key: password + password: "" # REQUIRED: ClickHouse default user password + + # --- Admin User (optional, separate from default) --- + adminUser: + enabled: false + # Precomputed SHA256 hex of the admin password (64 hex chars). + # Generate: echo -n 'your_password' | sha256sum | cut -d' ' -f1 + passwordSha256Hex: "" + +# ============================================================================= +# OpenTelemetry Server-Side Tracing +# ============================================================================= +# When enabled, ClickHouse logs spans to system.opentelemetry_span_log for +# queries arriving with W3C traceparent headers. +opentelemetry: + enabled: false + spanLog: + ttlDays: 7 + flushIntervalMs: 1000 + +# ============================================================================= +# Server +# ============================================================================= +server: + securityContext: + runAsNonRoot: true + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + + resources: + requests: + cpu: "1" + memory: "4Gi" + limits: + cpu: "2" + memory: "8Gi" + + persistence: + storageClass: "" + size: 50Gi + + settings: + maxConnections: 4096 + extraConfig: "" # Raw XML injected into server config + extraUsersConfig: "" # Raw XML injected into users config + prometheus: + enabled: true + port: 9363 + endpoint: /metrics + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 + +# ============================================================================= +# Keeper (ClickHouse Keeper for replication coordination) +# ============================================================================= +keeper: + replicas: 1 + + securityContext: + runAsNonRoot: true + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + + resources: + requests: + cpu: "250m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "1Gi" + + persistence: + storageClass: "" + size: 5Gi + + settings: + prometheus: + enabled: true + port: 9090 + endpoint: /metrics + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + +# ============================================================================= +# Pod Disruption Budgets +# ============================================================================= +podDisruptionBudget: + server: + enabled: false + maxUnavailable: 1 + keeper: + enabled: false + maxUnavailable: 1 + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + - kafka + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-analytics-namespace + # ports: + # - port: 8123 + # protocol: TCP + +# ============================================================================= +# Service Monitor (Prometheus Operator CRD) +# ============================================================================= +serviceMonitor: + enabled: false + interval: "15s" + serviceType: headless # headless = per-pod scraping, clusterIP = any-pod + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall diff --git a/environments/argo3/countly-tls.env b/environments/argo3/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/argo3/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/argo3/countly.yaml b/environments/argo3/countly.yaml new file mode 100644 index 0000000..ccf54bb --- /dev/null +++ b/environments/argo3/countly.yaml @@ -0,0 +1,570 @@ +# ============================================================================= +# Countly Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Service Account --- +serviceAccount: + create: true + name: "" # Auto-derived from release name if empty + annotations: {} + +# --- Image --- +image: + repository: gcr.io/countly-dev-313620/countly-unified + digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" + tag: "26.01" # Fallback when digest is empty + pullPolicy: IfNotPresent + +# --- Cross-Namespace References --- +clickhouseNamespace: clickhouse +kafkaNamespace: kafka +mongodbNamespace: mongodb + +# ============================================================================= +# Component: API +# ============================================================================= +api: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:api"] + port: 3001 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 120 + terminationGracePeriodSeconds: 120 + resources: + requests: + cpu: "1" + memory: "3.5Gi" + limits: + cpu: "1" + memory: "4Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 6 + metrics: + cpu: + averageUtilization: 70 + memory: + averageUtilization: 80 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 # 1-100, only used with type=preferred + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Frontend +# ============================================================================= +frontend: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:frontend"] + port: 6001 + healthCheck: + path: /ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + resources: + requests: + cpu: "1" + memory: "2.5Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 1 + metrics: + cpu: + averageUtilization: 80 + memory: {} + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Ingestor +# ============================================================================= +ingestor: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:ingestor"] + port: 3010 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 120 + terminationGracePeriodSeconds: 120 + resources: + requests: + cpu: "1" + memory: "3Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 12 + metrics: + cpu: + averageUtilization: 65 + memory: + averageUtilization: 75 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Aggregator +# ============================================================================= +aggregator: + enabled: true + replicaCount: 4 + command: ["npm", "run", "start:aggregator"] + port: 0 # No HTTP port exposed + healthCheck: {} # No HTTP health check (no port) + terminationGracePeriodSeconds: 60 + resources: + requests: + cpu: "1" + memory: "3.5Gi" + limits: + cpu: "2" + memory: "4Gi" + hpa: + enabled: true + minReplicas: 4 + maxReplicas: 8 + metrics: + cpu: + averageUtilization: 65 + memory: + averageUtilization: 65 + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 10 + periodSeconds: 60 + pdb: + enabled: true + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Job Server +# ============================================================================= +jobserver: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:jobserver"] + port: 3020 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + resources: + requests: + cpu: "1" + memory: "3Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 1 + metrics: + cpu: + averageUtilization: 80 + memory: + averageUtilization: 85 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: false + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Configuration (ConfigMaps) +# ============================================================================= +config: + # --- Common (shared by all components) --- + common: + NODE_ENV: production + COUNTLY_PLUGINS: "mobile,web,desktop,plugins,density,locale,browser,sources,views,logger,systemlogs,populator,reports,crashes,push,star-rating,slipping-away-users,compare,server-stats,dbviewer,crash_symbolication,crash-analytics,alerts,onboarding,consolidate,remote-config,hooks,dashboards,sdk,data-manager,guides,heatmaps,retention_segments,formulas,funnels,cohorts,ab-testing,performance-monitoring,config-transfer,data-migration,two-factor-auth,blocking,concurrent_users,revenue,activity-map,flows,surveys,event-timeline,drill,multi,active_users,ip-blocker,kafka,clickhouse" + COUNTLY_CONFIG__FILESTORAGE: gridfs + COUNTLY_CONFIG__DRILL_EVENTS_DRIVER: clickhouse + COUNTLY_CONFIG__SHARED_CONNECTION: "true" + COUNTLY_CONFIG__DATABASE_ADAPTERPREFERENCE: '["clickhouse","mongodb"]' + COUNTLY_CONFIG__DATABASE_ADAPTERS_MONGODB_ENABLED: "true" + COUNTLY_CONFIG__DATABASE_ADAPTERS_CLICKHOUSE_ENABLED: "true" + COUNTLY_CONFIG__DATABASE_FAILONCONNECTIONERROR: "true" + COUNTLY_CONFIG__EVENTSINK_SINKS: '["kafka"]' + COUNTLY_CONFIG__RELOADCONFIGAFTER: "10000" + + # --- API --- + api: + COUNTLY_CONTAINER: api + COUNTLY_CONFIG__API_PORT: "3001" + COUNTLY_CONFIG__API_HOST: "0.0.0.0" + COUNTLY_CONFIG__API_MAX_SOCKETS: "1024" + COUNTLY_CONFIG__API_MAX_UPLOAD_FILE_SIZE: "209715200" # 200 MiB + COUNTLY_CONFIG__API_TIMEOUT: "120000" # ms + + # --- Frontend --- + frontend: + COUNTLY_CONTAINER: frontend + COUNTLY_CONFIG__WEB_PORT: "6001" + COUNTLY_CONFIG__WEB_HOST: "0.0.0.0" + COUNTLY_CONFIG__WEB_SECURE_COOKIES: "false" + COUNTLY_CONFIG__COOKIE_MAXAGE: "86400000" # 24 hours in ms + + # --- Ingestor --- + ingestor: + COUNTLY_CONTAINER: ingestor + COUNTLY_CONFIG__INGESTOR_PORT: "3010" + COUNTLY_CONFIG__INGESTOR_HOST: "0.0.0.0" + + # --- Aggregator --- + aggregator: + COUNTLY_CONTAINER: aggregator + UV_THREADPOOL_SIZE: "6" + + # --- Job Server --- + jobserver: + COUNTLY_CONTAINER: jobserver + COUNTLY_CONFIG__JOBSERVER_PORT: "3020" + COUNTLY_CONFIG__JOBSERVER_HOST: "0.0.0.0" + + # --- ClickHouse Connection --- + clickhouse: + COUNTLY_CONFIG__CLICKHOUSE_QUERYOPTIONS_MAX_EXECUTION_TIME: "600" + COUNTLY_CONFIG__CLICKHOUSE_REQUEST_TIMEOUT: "1200000" # ms + COUNTLY_CONFIG__CLICKHOUSE_MAX_OPEN_CONNECTIONS: "10" + COUNTLY_CONFIG__CLICKHOUSE_APPLICATION: countly_drill + COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_REQUEST: "false" + COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_RESPONSE: "false" + COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_ENABLED: "true" + COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_IDLE_SOCKET_TTL: "10000" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_NAME: countly_cluster + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_SHARDS: "false" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_REPLICAS: "false" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_ISCLOUD: "false" + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_COORDINATORTYPE: keeper + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_ZKPATH: "/clickhouse/tables/{shard}/{database}/{table}" + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_REPLICANAME: "{replica}" + COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_ENABLED: "false" + COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_MAXPARALLELREPLICAS: "2" + COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_WRITETHROUGH: "true" + COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_INSERTDISTRIBUTEDSYNC: "true" + COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_NATIVEPORT: "9000" + COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_SECURE: "false" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_DAYSOLD: "30" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MIN: "60" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MAX: "120" + + # --- Kafka Connection --- + kafka: + COUNTLY_CONFIG__KAFKA_ENABLED: "true" + COUNTLY_CONFIG__KAFKA_DRILLEVENTSTOPIC: drill-events + COUNTLY_CONFIG__KAFKA_CLUSTER_NAME: cly-kafka + COUNTLY_CONFIG__KAFKA_PARTITIONS: "100" + COUNTLY_CONFIG__KAFKA_REPLICATIONFACTOR: "2" + COUNTLY_CONFIG__KAFKA_RETENTIONMS: "604800000" # 7 days in ms + COUNTLY_CONFIG__KAFKA_ENABLETRANSACTIONS: "false" + COUNTLY_CONFIG__KAFKA_TRANSACTIONTIMEOUT: "60000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_CLIENTID: countly-app + COUNTLY_CONFIG__KAFKA_RDKAFKA_REQUESTTIMEOUTMS: "20000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_CONNECTIONTIMEOUTMS: "8000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_LINGERMS: "10" + COUNTLY_CONFIG__KAFKA_RDKAFKA_RETRIES: "5" + COUNTLY_CONFIG__KAFKA_RDKAFKA_ACKS: "-1" # -1 = all ISR replicas must acknowledge + COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMINBYTES: "1024000" + COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMAXWAITMS: "1200" + COUNTLY_CONFIG__KAFKA_CONSUMER_SESSIONTIMEOUTMS: "120000" + COUNTLY_CONFIG__KAFKA_CONSUMER_HEARTBEATINTERVALMS: "20000" + COUNTLY_CONFIG__KAFKA_CONSUMER_AUTOOFFSETRESET: earliest + COUNTLY_CONFIG__KAFKA_CONSUMER_ENABLEAUTOCOMMIT: "false" + COUNTLY_CONFIG__KAFKA_CONSUMER_MAXPOLLINTERVALMS: "600000" + COUNTLY_CONFIG__KAFKA_CONNECTCONSUMERGROUPID: "connect-ch" + + # --- OpenTelemetry --- + otel: + OTEL_ENABLED: "false" + OTEL_EXPORTER_OTLP_ENDPOINT: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4318" + OTEL_EXPORTER_OTLP_PROTOCOL: "http/protobuf" + OTEL_TRACES_SAMPLER: "parentbased_traceidratio" + OTEL_TRACES_SAMPLER_ARG: "1.0" # 0.0-1.0, fraction of traces to sample + PYROSCOPE_ENABLED: "false" + +# --- Node.js Options (injected into configmap per component) --- +nodeOptions: + api: "--max-old-space-size=3072 --max-semi-space-size=256" + frontend: "--max-old-space-size=2048" + ingestor: "--max-old-space-size=2048 --max-semi-space-size=256" + aggregator: "--max-old-space-size=3072 --max-semi-space-size=128" + jobserver: "--max-old-space-size=2048 --max-semi-space-size=256" + +# ============================================================================= +# Backing Service Modes +# ============================================================================= +# When mode=external, the corresponding chart is not deployed and connection +# details below are used instead. + +backingServices: + mongodb: + mode: bundled # bundled | external + host: "" + port: "27017" + connectionString: "" # If set, used as-is (bypasses host/port/user/pass) + username: "app" + password: "" + database: "admin" + replicaSet: "" + existingSecret: "" + # --- External MongoDB Atlas example --- + # mode: external + # connectionString: "mongodb+srv://user:pass@cluster0.example.mongodb.net/admin?retryWrites=true&w=majority" + + clickhouse: + mode: bundled # bundled | external + host: "" + port: "8123" + tls: "false" + username: "default" + password: "" + database: "countly_drill" + existingSecret: "" + # --- External ClickHouse Cloud example --- + # mode: external + # host: "abc123.us-east-1.aws.clickhouse.cloud" + # port: "8443" + # tls: "true" + + kafka: + mode: bundled # bundled | external + brokers: "" # Comma-separated broker list + securityProtocol: "PLAINTEXT" # PLAINTEXT | SSL | SASL_PLAINTEXT | SASL_SSL + saslMechanism: "" + saslUsername: "" + saslPassword: "" + existingSecret: "" + # --- External Confluent Cloud example --- + # mode: external + # brokers: "pkc-12345.us-east-1.aws.confluent.cloud:9092" + # securityProtocol: "SASL_SSL" + # saslMechanism: "PLAIN" + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + mode: values # values | existingSecret | externalSecret + keep: true # Retain secrets on helm uninstall + rotationId: "" # Change to force secret re-creation + + common: + existingSecret: "" + encryptionReportsKey: "" # REQUIRED: min 8 chars + webSessionSecret: "" # REQUIRED: min 8 chars + passwordSecret: "" # REQUIRED: min 8 chars + + clickhouse: + existingSecret: "" + username: "" + password: "" + database: "" + + kafka: + existingSecret: "" + securityProtocol: "" + saslMechanism: "" + saslUsername: "" + saslPassword: "" + + mongodb: + existingSecret: "" + key: "connectionString.standard" # Key within the secret to read + password: "" # REQUIRED on first install (must match users.app.password in countly-mongodb) + + # --- ExternalSecret configuration (used only when mode=externalSecret) --- + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: "" + webSessionSecret: "" + passwordSecret: "" + clickhouse: + url: "" + username: "" + password: "" + database: "" + kafka: + brokers: "" + securityProtocol: "" + saslMechanism: "" + saslUsername: "" + saslPassword: "" + mongodb: + connectionString: "" + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + ingressNamespaceSelector: + kubernetes.io/metadata.name: ingress-nginx + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-custom-namespace + # ports: + # - port: 3001 + # protocol: TCP + +# ============================================================================= +# Ingress +# ============================================================================= +ingress: + enabled: true + className: nginx + annotations: + # F5 NGINX Ingress Controller (OSS) annotations + nginx.org/client-max-body-size: "50m" + nginx.org/proxy-buffering: "True" + nginx.org/proxy-buffer-size: "256k" + nginx.org/proxy-buffers: "16 256k" + nginx.org/proxy-busy-buffers-size: "512k" + nginx.org/proxy-max-temp-file-size: "2048m" + nginx.org/client-body-buffer-size: "2m" + nginx.org/proxy-connect-timeout: "60s" + nginx.org/proxy-read-timeout: "120s" + nginx.org/proxy-send-timeout: "120s" + nginx.org/keepalive: "256" + nginx.org/server-snippets: | + otel_trace on; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_set_header Connection ""; + proxy_set_header X-Request-ID $request_id; + proxy_set_header X-Request-Start $msec; + # traceparent/tracestate now handled by ngx_otel_module (otel-trace-context: propagate) + client_header_timeout 30s; + nginx.org/location-snippets: | + proxy_request_buffering on; + proxy_next_upstream error timeout http_502 http_503 http_504; + proxy_next_upstream_timeout 30s; + proxy_next_upstream_tries 3; + proxy_temp_file_write_size 1m; + client_body_timeout 120s; + hostname: "" # Set via argocd/customers/.yaml + tls: + # TLS mode: letsencrypt | existingSecret | selfSigned | http + # http: No TLS + # letsencrypt: cert-manager + Let's Encrypt (recommended for production) + # existingSecret: Bring your own TLS secret + # selfSigned: cert-manager self-signed CA (for development) + mode: "" # Set via argocd/customers/.yaml + clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt + secretName: "" # Auto-derived if empty: -tls + selfSigned: + issuerName: "" # Auto-derived if empty: -ca-issuer + caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/environments/argo3/external-secrets.example.yaml b/environments/argo3/external-secrets.example.yaml new file mode 100644 index 0000000..7bf93ef --- /dev/null +++ b/environments/argo3/external-secrets.example.yaml @@ -0,0 +1,36 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in environments//countly.yaml: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: my-secret-store +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "countly/encryption-reports-key" +# webSessionSecret: "countly/web-session-secret" +# passwordSecret: "countly/password-secret" +# clickhouse: +# url: "countly/clickhouse-url" +# username: "countly/clickhouse-username" +# password: "countly/clickhouse-password" +# database: "countly/clickhouse-database" +# kafka: +# brokers: "countly/kafka-brokers" +# securityProtocol: "countly/kafka-security-protocol" +# mongodb: +# connectionString: "countly/mongodb-connection-string" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/argo3/global.yaml b/environments/argo3/global.yaml new file mode 100644 index 0000000..f0d6f8a --- /dev/null +++ b/environments/argo3/global.yaml @@ -0,0 +1,26 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + storageClass: "" + imagePullSecrets: [] + +ingress: + hostname: argo3.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/argo3/kafka.yaml b/environments/argo3/kafka.yaml new file mode 100644 index 0000000..0a9b864 --- /dev/null +++ b/environments/argo3/kafka.yaml @@ -0,0 +1,384 @@ +# ============================================================================= +# Kafka Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-kafka/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Strimzi Operator API Version --- +strimzi: + apiVersion: kafka.strimzi.io/v1 + +# --- Kafka Version --- +version: "4.2.0" + +# ============================================================================= +# Brokers +# ============================================================================= +brokers: + replicas: 3 + resources: + requests: + cpu: "1" + memory: "4Gi" + limits: + cpu: "1" + memory: "4Gi" + jvmOptions: + xms: "2g" + xmx: "2g" + + # --- Persistence --- + persistence: + volumes: + - id: 0 + size: 100Gi + storageClass: "" + deleteClaim: false # Delete PVC when broker is removed + + # --- Broker Config --- + config: + default.replication.factor: 2 + min.insync.replicas: 2 + log.retention.hours: 168 # 7 days + log.segment.bytes: "1073741824" # 1 GiB + compression.type: lz4 + auto.create.topics.enable: false + offsets.topic.replication.factor: 2 + num.partitions: 24 + transaction.state.log.replication.factor: 2 + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +# ============================================================================= +# Controllers (KRaft) +# ============================================================================= +controllers: + replicas: 3 + resources: + requests: + cpu: "500m" + memory: "2Gi" + limits: + cpu: "1" + memory: "2Gi" + + persistence: + size: 20Gi + storageClass: "" + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +# ============================================================================= +# Listeners +# ============================================================================= +listeners: + - name: internal + port: 9092 + type: internal + tls: false + +# ============================================================================= +# Cruise Control +# ============================================================================= +cruiseControl: + enabled: true + resources: + requests: + cpu: "1" + memory: "2Gi" + limits: + cpu: "1" + memory: "2Gi" + jvmOptions: + xms: "1g" + xmx: "2g" + autoRebalance: + - mode: add-brokers + - mode: remove-brokers + +# ============================================================================= +# Kafka Connect (ClickHouse Sink) +# ============================================================================= +kafkaConnect: + enabled: true + name: connect-ch + image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + replicas: 2 + bootstrapServers: "" # Auto-derived from cluster if empty + + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "2" + memory: "8Gi" + jvmOptions: + xms: "5g" + xmx: "5g" + + # --- Worker Configuration --- + workerConfig: + group.id: connect-ch + config.storage.topic: connect_ch_configs + offset.storage.topic: connect_ch_offsets + status.storage.topic: connect_ch_status + config.storage.replication.factor: 2 + offset.storage.replication.factor: 2 + status.storage.replication.factor: 2 + offset.storage.partitions: 25 + status.storage.partitions: 5 + key.converter: org.apache.kafka.connect.storage.StringConverter + value.converter: org.apache.kafka.connect.json.JsonConverter + value.converter.schemas.enable: "false" + connector.client.config.override.policy: All + config.providers: env + config.providers.env.class: org.apache.kafka.common.config.provider.EnvVarConfigProvider + + # --- ClickHouse Connection (for the sink connector) --- + clickhouse: + existingSecret: "" + secretName: clickhouse-auth + host: "" # Auto-derived from clickhouseNamespace if empty + port: "8123" + ssl: "false" + database: "countly_drill" + username: "default" + password: "" # REQUIRED: must match ClickHouse default user password + + # --- Environment Variables (injected into Connect pods) --- + env: + EXACTLY_ONCE: "false" + ERRORS_RETRY_TIMEOUT: "300" + ERRORS_TOLERANCE: "none" # none | all + CLICKHOUSE_SETTINGS: "input_format_binary_read_json_as_string=1,allow_experimental_json_type=1,enable_json_type=1,async_insert=1,wait_for_async_insert=1,async_insert_use_adaptive_busy_timeout=1,async_insert_busy_timeout_ms=10000,async_insert_max_data_size=268435456,async_insert_max_query_number=64,min_insert_block_size_rows=250000,min_insert_block_size_bytes=268435456,max_partitions_per_insert_block=500" + BYPASS_ROW_BINARY: "false" + TABLE_REFRESH_INTERVAL: "300" # seconds + KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter + VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter + VALUE_CONVERTER_SCHEMAS_ENABLE: "false" + KAFKA_CONSUMER_FETCH_MIN_BYTES: "33554432" # 32 MiB + KAFKA_CONSUMER_FETCH_MAX_WAIT_MS: "60000" + KAFKA_CONSUMER_MAX_POLL_RECORDS: "250000" + KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES: "134217728" # 128 MiB + KAFKA_CONSUMER_FETCH_MAX_BYTES: "536870912" # 512 MiB + KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS: "900000" + KAFKA_CONSUMER_SESSION_TIMEOUT_MS: "45000" + KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS: "15000" + KAFKA_CONSUMER_REQUEST_TIMEOUT_MS: "120000" + + # --- HPA --- + hpa: + enabled: false + minReplicas: 1 + maxReplicas: 3 + metrics: + cpu: + averageUtilization: 70 + memory: + averageUtilization: 80 + behavior: + scaleUp: + stabilizationWindowSeconds: 120 + policies: + - type: Percent + value: 50 + periodSeconds: 120 + - type: Pods + value: 2 + periodSeconds: 120 + selectPolicy: Min + scaleDown: + stabilizationWindowSeconds: 600 + policies: + - type: Percent + value: 25 + periodSeconds: 300 + - type: Pods + value: 1 + periodSeconds: 300 + selectPolicy: Min + + # --- OpenTelemetry Java Agent --- + # Baked into the Docker image at /opt/otel/opentelemetry-javaagent.jar. + # When enabled, JAVA_TOOL_OPTIONS activates it for Kafka consumer/producer + # and outbound HTTP (ClickHouse sink) span creation. + otel: + enabled: false + serviceName: "kafka-connect" + exporterEndpoint: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4317" + exporterProtocol: "grpc" + sampler: "parentbased_traceidratio" + samplerArg: "1.0" + resourceAttributes: "" # e.g. "deployment.environment=production,k8s.cluster.name=my-cluster" + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + + # --- Connectors --- + connectors: + - name: ch-sink-drill-events + enabled: true + state: running # running | paused | stopped + class: com.clickhouse.kafka.connect.ClickHouseSinkConnector + tasksMax: 1 + autoRestart: + enabled: true + maxRestarts: 10 + config: + topics: drill-events + topic2TableMap: "drill-events=drill_events" + hostname: "${env:CLICKHOUSE_HOST}" + port: "${env:CLICKHOUSE_PORT}" + ssl: "${env:CLICKHOUSE_SSL}" + database: "${env:CLICKHOUSE_DB}" + username: "${env:CLICKHOUSE_USER}" + password: "${env:CLICKHOUSE_PASSWORD}" + exactlyOnce: "${env:EXACTLY_ONCE}" + errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" + errors.tolerance: "${env:ERRORS_TOLERANCE}" + clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" + bypassRowBinary: "${env:BYPASS_ROW_BINARY}" + tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" + key.converter: "${env:KEY_CONVERTER}" + value.converter: "${env:VALUE_CONVERTER}" + value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" + consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" + consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" + consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" + consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" + consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" + consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" + consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" + consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" + consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" + connection.timeout: "60" + socket.timeout: "30000" + retry.count: "3" + connection.pool.size: "10" + healthcheck.enabled: "true" + healthcheck.interval: "10000" + dlq: {} # Dead-letter queue config (empty = disabled) + +# ============================================================================= +# Metrics +# ============================================================================= +metrics: + enabled: true + +# --- Cross-Namespace Reference --- +clickhouseNamespace: clickhouse + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 9092 + # protocol: TCP + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall +# Kafka overrides for argo3 +# +# Migration is disabled for this customer, so keep the drill sink connector off. + +kafkaConnect: + connectors: + - name: ch-sink-drill-events + enabled: false + state: running + class: com.clickhouse.kafka.connect.ClickHouseSinkConnector + tasksMax: 1 + autoRestart: + enabled: true + maxRestarts: 10 + config: + topics: drill-events + topic2TableMap: "drill-events=drill_events" + hostname: "${env:CLICKHOUSE_HOST}" + port: "${env:CLICKHOUSE_PORT}" + ssl: "${env:CLICKHOUSE_SSL}" + database: "${env:CLICKHOUSE_DB}" + username: "${env:CLICKHOUSE_USER}" + password: "${env:CLICKHOUSE_PASSWORD}" + exactlyOnce: "${env:EXACTLY_ONCE}" + errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" + errors.tolerance: "${env:ERRORS_TOLERANCE}" + clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" + bypassRowBinary: "${env:BYPASS_ROW_BINARY}" + tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" + key.converter: "${env:KEY_CONVERTER}" + value.converter: "${env:VALUE_CONVERTER}" + value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" + consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" + consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" + consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" + consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" + consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" + consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" + consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" + consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" + consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" + connection.timeout: "60" + socket.timeout: "30000" + retry.count: "3" + connection.pool.size: "10" + healthcheck.enabled: "true" + healthcheck.interval: "10000" + dlq: {} diff --git a/environments/argo3/migration.yaml b/environments/argo3/migration.yaml new file mode 100644 index 0000000..6fa760c --- /dev/null +++ b/environments/argo3/migration.yaml @@ -0,0 +1,3 @@ +# Migration overrides for optional countly-migration app. +# Enable per customer by setting `migration: enabled` in argocd/customers/.yaml +# and then filling this file with environment-specific overrides as needed. diff --git a/environments/argo3/mongodb.yaml b/environments/argo3/mongodb.yaml new file mode 100644 index 0000000..31f230e --- /dev/null +++ b/environments/argo3/mongodb.yaml @@ -0,0 +1,144 @@ +# ============================================================================= +# MongoDB Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-mongodb/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# ============================================================================= +# MongoDB Server +# ============================================================================= +mongodb: + version: "8.2.5" + members: 2 # Replica set member count + + resources: + requests: + cpu: "500m" + memory: "2Gi" + limits: + cpu: "2" + memory: "8Gi" + + persistence: + storageClass: "" # Overrides global.storageClass for MongoDB PVCs + size: 100Gi + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 + + # --- TLS --- + tls: + enabled: false + +# ============================================================================= +# Users +# ============================================================================= +users: + # --- Application User --- + app: + name: app + database: admin + roles: + - name: readWriteAnyDatabase + db: admin + - name: dbAdmin + db: admin + passwordSecretName: app-user-password + passwordSecretKey: password + password: "" # REQUIRED on first install + + # --- Metrics Exporter User --- + metrics: + enabled: true + name: metrics + database: admin + roles: + - name: clusterMonitor + db: admin + - name: read + db: local + passwordSecretName: metrics-user-password + passwordSecretKey: password + password: "" # REQUIRED on first install + +# ============================================================================= +# Prometheus Exporter +# ============================================================================= +exporter: + enabled: true + image: percona/mongodb_exporter:0.40.0 + port: 9216 + resources: + requests: + cpu: "50m" + memory: "64Mi" + limits: + cpu: "200m" + memory: "256Mi" + service: + enabled: true + args: + - --collect-all + - --collector.diagnosticdata + - --collector.replicasetstatus + - --collector.dbstats + - --collector.topmetrics + - --collector.indexstats + - --collector.collstats + +# ============================================================================= +# Pod Disruption Budget +# ============================================================================= +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 27017 + # protocol: TCP + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall diff --git a/environments/argo3/observability.yaml b/environments/argo3/observability.yaml new file mode 100644 index 0000000..79a4b39 --- /dev/null +++ b/environments/argo3/observability.yaml @@ -0,0 +1,437 @@ +# ============================================================================= +# Observability Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-observability/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Deployment Mode --- +# full — All backends + Grafana in-cluster +# hybrid — All backends in-cluster, no Grafana (use external Grafana) +# external — Collectors only, forward to external endpoints +# disabled — Set in global.yaml observability.mode to skip this chart entirely +mode: full + +# --- Cluster Name (injected into Prometheus external_labels) --- +clusterName: countly-local + +# --- Cross-Namespace References --- +countlyNamespace: countly +clickhouseNamespace: clickhouse +mongodbNamespace: mongodb +kafkaNamespace: kafka +ingressNamespace: ingress-nginx +certManagerNamespace: cert-manager +clickhouseOperatorNamespace: clickhouse-operator-system + +# --- NGINX Ingress Controller Scrape Configuration --- +nginxIngress: + podLabelName: "nginx-ingress" # F5 NGINX IC = "nginx-ingress", community = "ingress-nginx" + metricsPort: "9113" + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +# ============================================================================= +# Per-Signal Configuration +# ============================================================================= + +# --- Metrics --- +metrics: + enabled: true + sampling: + interval: "15s" # Global Prometheus scrape_interval + +# --- Traces --- +traces: + enabled: true + sampling: + strategy: "AlwaysOn" # AlwaysOn | TraceIdRatio | ParentBased | TailBased + ratio: 1.0 # 0.0-1.0, used with TraceIdRatio or ParentBased + tailSampling: # Only used when strategy == TailBased + waitDuration: "10s" + numTraces: 50000 + policies: + keepErrors: true + latencyThresholdMs: 2000 + baselineRatio: 0.1 + +# --- Logs --- +logs: + enabled: true + sampling: + enabled: false + dropRate: 0 # 0.0-1.0, fraction of logs to drop + +# --- Profiling --- +profiling: + enabled: true + sampling: + rate: "100" # Advisory — used in NOTES.txt for SDK config + +# ============================================================================= +# Prometheus +# ============================================================================= +prometheus: + image: + repository: prom/prometheus + tag: "v3.10.0" + retention: + time: "30d" + size: "50GB" + storage: + size: 100Gi + storageClass: "" + resources: + requests: + cpu: "2" + memory: "3Gi" + limits: + cpu: "2" + memory: "4Gi" + extraArgs: [] + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + remoteWriteUrl: "" # Full Prometheus remote write URL (e.g. https://prom.corp.com/api/v1/write) + +# ============================================================================= +# Loki +# ============================================================================= +loki: + image: + repository: grafana/loki + tag: "3.6.7" + retention: "30d" + storage: + backend: "filesystem" # filesystem | s3 | gcs | azure + size: 100Gi + storageClass: "" + # Object storage settings (only used when backend != filesystem) + bucket: "" # Bucket/container name (REQUIRED for object backends) + endpoint: "" # Custom endpoint (e.g. MinIO: http://minio:9000) + region: "" # Cloud region (S3) + insecure: false # Use HTTP instead of HTTPS + forcePathStyle: false # S3 path-style access (required for MinIO) + # Credential file secret (for GCS JSON key files) + existingSecret: "" # K8s Secret name to mount + secretKey: "key.json" # Key within the Secret + secretMountPath: "/var/secrets/storage" + # Env-based credentials (for AWS access keys, Azure account keys) + envFromSecret: "" # K8s Secret name to inject as env vars + # Provider-specific passthrough (rendered directly into provider block) + config: {} + # + # --- Object storage examples (apply to loki, tempo, and pyroscope) --- + # + # --- AWS S3 example --- + # backend: s3 + # s3: + # bucket: my-loki-data + # region: us-east-1 + # endpoint: "" + # insecure: false + # forcePathStyle: false + # credentials: + # source: envFromSecret + # envFromSecret: loki-s3-credentials # Must contain AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY + # + # --- GCS example --- + # backend: gcs + # gcs: + # bucket: my-loki-data + # credentials: + # source: existingSecret + # existingSecret: loki-gcs-key + # secretKey: "key.json" + # + # --- MinIO example --- + # backend: s3 + # s3: + # bucket: loki + # endpoint: minio.storage.svc.cluster.local:9000 + # insecure: true + # forcePathStyle: true + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + config: + maxStreamsPerUser: 30000 + maxLineSize: 256000 + ingestionRateMb: 64 + ingestionBurstSizeMb: 128 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + pushUrl: "" # Full Loki push URL (e.g. https://loki.corp.com/loki/api/v1/push) + +# ============================================================================= +# Tempo +# ============================================================================= +tempo: + image: + repository: grafana/tempo + tag: "2.10.1" + retention: "12h" + storage: + backend: "local" # local | s3 | gcs | azure + size: 150Gi + storageClass: "" + bucket: "" + endpoint: "" + region: "" + insecure: false + forcePathStyle: false + existingSecret: "" + secretKey: "key.json" + secretMountPath: "/var/secrets/storage" + envFromSecret: "" + config: {} + resources: + requests: + cpu: "3" + memory: "6Gi" + limits: + cpu: "4" + memory: "10Gi" + config: + ingestionRateLimitBytes: 100000000 + ingestionBurstSizeBytes: 150000000 + maxTracesPerUser: 50000 + maxBytesPerTrace: 5000000 + maxRecvMsgSizeMiB: 16 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + otlpGrpcEndpoint: "" # Tempo OTLP gRPC host:port (e.g. tempo.corp.com:4317) + otlpHttpEndpoint: "" # Tempo OTLP HTTP URL (optional fallback) + +# ============================================================================= +# Pyroscope +# ============================================================================= +pyroscope: + image: + repository: grafana/pyroscope + tag: "1.18.1" + retention: "72h" + storage: + backend: "filesystem" # filesystem | s3 | gcs | azure | swift + size: 20Gi + storageClass: "" + bucket: "" + endpoint: "" + region: "" + insecure: false + forcePathStyle: false + existingSecret: "" + secretKey: "key.json" + secretMountPath: "/var/secrets/storage" + envFromSecret: "" + config: {} + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + ingestUrl: "" # Pyroscope ingest URL (e.g. https://pyroscope.corp.com) + +# ============================================================================= +# Grafana +# ============================================================================= +grafana: + enabled: true # Only deployed when mode == "full" + image: + repository: grafana/grafana + tag: "12.4.0" + admin: + existingSecret: "" # Use an existing Secret for admin credentials + userKey: "admin-user" + passwordKey: "admin-password" + persistence: + enabled: false # Ephemeral by default (declarative config, no state to lose) + size: 10Gi + storageClass: "" + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + plugins: + install: "grafana-pyroscope-datasource" + featureToggles: "tempoSearch,tempoBackendSearch,traceqlEditor,exploreTraces" + dashboards: + enabled: true + overview: true + platform: true + countly: true + data: true + edge: true + pdb: + enabled: false + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + url: "" # External Grafana URL (for NOTES.txt only) + +# ============================================================================= +# Alloy (DaemonSet — log collection) +# ============================================================================= +alloy: + image: + repository: grafana/alloy + tag: "v1.13.2" + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + scheduling: + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + +# ============================================================================= +# Alloy-OTLP (Deployment — OTLP traces + profiling receive) +# ============================================================================= +alloyOtlp: + image: + repository: grafana/alloy + tag: "v1.13.2" + replicas: 1 + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + memoryLimiter: + limit: "1600MiB" # Must be < resources.limits.memory + spikeLimit: "400MiB" + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# Alloy-Metrics (Deployment — ALL Prometheus scraping) +# ============================================================================= +alloyMetrics: + image: + repository: grafana/alloy + tag: "v1.13.2" + replicas: 1 + resources: + requests: + cpu: "500m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "512Mi" + pdb: + enabled: false + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# kube-state-metrics +# ============================================================================= +kubeStateMetrics: + enabled: true + image: + repository: registry.k8s.io/kube-state-metrics/kube-state-metrics + tag: "v2.18.0" + resources: + requests: + cpu: "10m" + memory: "32Mi" + limits: + cpu: "100m" + memory: "256Mi" + namespaces: + - countly + - observability + - ingress-nginx + - kube-system + - clickhouse + - mongodb + - kafka + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# node-exporter +# ============================================================================= +nodeExporter: + enabled: true + image: + repository: prom/node-exporter + tag: "v1.10.2" + resources: + requests: + cpu: "100m" + memory: "180Mi" + limits: + cpu: "250m" + memory: "300Mi" + +# ============================================================================= +# Ingress (for Grafana) +# ============================================================================= +ingress: + enabled: false + className: nginx + annotations: {} + hosts: + - host: obs.example.com + tls: [] + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 4318 + # protocol: TCP diff --git a/environments/argo3/secrets-clickhouse.yaml b/environments/argo3/secrets-clickhouse.yaml new file mode 100644 index 0000000..d74b27b --- /dev/null +++ b/environments/argo3/secrets-clickhouse.yaml @@ -0,0 +1,4 @@ +# ClickHouse direct secrets for argo3 +auth: + defaultUserPassword: + password: "argo3-clickhouse-2026" diff --git a/environments/argo3/secrets-countly.yaml b/environments/argo3/secrets-countly.yaml new file mode 100644 index 0000000..d4d41d5 --- /dev/null +++ b/environments/argo3/secrets-countly.yaml @@ -0,0 +1,15 @@ +# Countly direct secrets for argo3 +secrets: + mode: values + common: + encryptionReportsKey: "argo3-reports-key-2026" + webSessionSecret: "argo3-web-session-2026" + passwordSecret: "argo3-password-secret-2026" + clickhouse: + username: "default" + password: "argo3-clickhouse-2026" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + mongodb: + password: "argo3-mongo-2026" diff --git a/environments/argo3/secrets-kafka.yaml b/environments/argo3/secrets-kafka.yaml new file mode 100644 index 0000000..98980a7 --- /dev/null +++ b/environments/argo3/secrets-kafka.yaml @@ -0,0 +1,4 @@ +# Kafka direct secrets for argo3 +kafkaConnect: + clickhouse: + password: "argo3-clickhouse-2026" diff --git a/environments/argo3/secrets-migration.yaml b/environments/argo3/secrets-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/argo3/secrets-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/argo3/secrets-mongodb.yaml b/environments/argo3/secrets-mongodb.yaml new file mode 100644 index 0000000..2e5a2c6 --- /dev/null +++ b/environments/argo3/secrets-mongodb.yaml @@ -0,0 +1,7 @@ +# MongoDB direct secrets for argo3 +users: + app: + password: "argo3-mongo-2026" + metrics: + enabled: true + password: "argo3-metrics-2026" diff --git a/environments/argo3/secrets-observability.yaml b/environments/argo3/secrets-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/argo3/secrets-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/argo3/secrets.example.yaml b/environments/argo3/secrets.example.yaml new file mode 100644 index 0000000..282eb0d --- /dev/null +++ b/environments/argo3/secrets.example.yaml @@ -0,0 +1,42 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//secrets-countly.yaml) --- +secrets: + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- +users: + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//secrets-kafka.yaml) --- +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" diff --git a/environments/argo3/secrets.sops.example.yaml b/environments/argo3/secrets.sops.example.yaml new file mode 100644 index 0000000..9b652d1 --- /dev/null +++ b/environments/argo3/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//secrets-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//secrets-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From 511b179089ab88f3604ad5b4af52af15a40434a8 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 15:31:17 +0530 Subject: [PATCH 29/79] Remove test customers and direct secret fixtures --- .gitignore | 3 - argocd/customers/argo1.yaml | 11 - argocd/customers/argo2.yaml | 11 - argocd/customers/argo3.yaml | 11 - environments/argo1/README.md | 69 --- environments/argo1/clickhouse.yaml | 203 ------- environments/argo1/countly-tls.env | 7 - environments/argo1/countly.yaml | 570 ------------------ .../argo1/external-secrets.example.yaml | 36 -- environments/argo1/global.yaml | 26 - environments/argo1/kafka.yaml | 385 ------------ environments/argo1/migration.yaml | 3 - environments/argo1/mongodb.yaml | 144 ----- environments/argo1/observability.yaml | 437 -------------- environments/argo1/secrets-clickhouse.yaml | 4 - environments/argo1/secrets-countly.yaml | 15 - environments/argo1/secrets-kafka.yaml | 4 - environments/argo1/secrets-migration.yaml | 2 - environments/argo1/secrets-mongodb.yaml | 7 - environments/argo1/secrets-observability.yaml | 2 - environments/argo1/secrets.example.yaml | 42 -- environments/argo1/secrets.sops.example.yaml | 21 - environments/argo2/README.md | 69 --- environments/argo2/clickhouse.yaml | 203 ------- environments/argo2/countly-tls.env | 7 - environments/argo2/countly.yaml | 570 ------------------ .../argo2/external-secrets.example.yaml | 36 -- environments/argo2/global.yaml | 26 - environments/argo2/kafka.yaml | 384 ------------ environments/argo2/migration.yaml | 3 - environments/argo2/mongodb.yaml | 144 ----- environments/argo2/observability.yaml | 437 -------------- environments/argo2/secrets-clickhouse.yaml | 4 - environments/argo2/secrets-countly.yaml | 15 - environments/argo2/secrets-kafka.yaml | 4 - environments/argo2/secrets-migration.yaml | 2 - environments/argo2/secrets-mongodb.yaml | 7 - environments/argo2/secrets-observability.yaml | 2 - environments/argo2/secrets.example.yaml | 42 -- environments/argo2/secrets.sops.example.yaml | 21 - environments/argo3/README.md | 69 --- environments/argo3/clickhouse.yaml | 203 ------- environments/argo3/countly-tls.env | 7 - environments/argo3/countly.yaml | 570 ------------------ .../argo3/external-secrets.example.yaml | 36 -- environments/argo3/global.yaml | 26 - environments/argo3/kafka.yaml | 384 ------------ environments/argo3/migration.yaml | 3 - environments/argo3/mongodb.yaml | 144 ----- environments/argo3/observability.yaml | 437 -------------- environments/argo3/secrets-clickhouse.yaml | 4 - environments/argo3/secrets-countly.yaml | 15 - environments/argo3/secrets-kafka.yaml | 4 - environments/argo3/secrets-migration.yaml | 2 - environments/argo3/secrets-mongodb.yaml | 7 - environments/argo3/secrets-observability.yaml | 2 - environments/argo3/secrets.example.yaml | 42 -- environments/argo3/secrets.sops.example.yaml | 21 - environments/helm-argo-test/clickhouse.yaml | 5 - environments/helm-argo-test/countly.yaml | 7 - environments/helm-argo-test/global.yaml | 26 - environments/helm-argo-test/kafka.yaml | 52 -- environments/helm-argo-test/migration.yaml | 1 - environments/helm-argo-test/mongodb.yaml | 5 - .../helm-argo-test/observability.yaml | 2 - .../helm-argo-test/secrets-clickhouse.yaml | 6 - .../helm-argo-test/secrets-countly.yaml | 16 - .../helm-argo-test/secrets-kafka.yaml | 7 - .../helm-argo-test/secrets-migration.yaml | 1 - .../helm-argo-test/secrets-mongodb.yaml | 13 - .../helm-argo-test/secrets-observability.yaml | 1 - environments/helm-argocd/clickhouse.yaml | 5 - environments/helm-argocd/countly.yaml | 7 - environments/helm-argocd/global.yaml | 26 - environments/helm-argocd/kafka.yaml | 7 - environments/helm-argocd/mongodb.yaml | 5 - environments/helm-argocd/observability.yaml | 2 - .../helm-argocd/secrets-clickhouse.yaml | 6 - environments/helm-argocd/secrets-countly.yaml | 16 - environments/helm-argocd/secrets-kafka.yaml | 7 - environments/helm-argocd/secrets-mongodb.yaml | 13 - .../helm-argocd/secrets-observability.yaml | 1 - 82 files changed, 6202 deletions(-) delete mode 100644 argocd/customers/argo1.yaml delete mode 100644 argocd/customers/argo2.yaml delete mode 100644 argocd/customers/argo3.yaml delete mode 100644 environments/argo1/README.md delete mode 100644 environments/argo1/clickhouse.yaml delete mode 100644 environments/argo1/countly-tls.env delete mode 100644 environments/argo1/countly.yaml delete mode 100644 environments/argo1/external-secrets.example.yaml delete mode 100644 environments/argo1/global.yaml delete mode 100644 environments/argo1/kafka.yaml delete mode 100644 environments/argo1/migration.yaml delete mode 100644 environments/argo1/mongodb.yaml delete mode 100644 environments/argo1/observability.yaml delete mode 100644 environments/argo1/secrets-clickhouse.yaml delete mode 100644 environments/argo1/secrets-countly.yaml delete mode 100644 environments/argo1/secrets-kafka.yaml delete mode 100644 environments/argo1/secrets-migration.yaml delete mode 100644 environments/argo1/secrets-mongodb.yaml delete mode 100644 environments/argo1/secrets-observability.yaml delete mode 100644 environments/argo1/secrets.example.yaml delete mode 100644 environments/argo1/secrets.sops.example.yaml delete mode 100644 environments/argo2/README.md delete mode 100644 environments/argo2/clickhouse.yaml delete mode 100644 environments/argo2/countly-tls.env delete mode 100644 environments/argo2/countly.yaml delete mode 100644 environments/argo2/external-secrets.example.yaml delete mode 100644 environments/argo2/global.yaml delete mode 100644 environments/argo2/kafka.yaml delete mode 100644 environments/argo2/migration.yaml delete mode 100644 environments/argo2/mongodb.yaml delete mode 100644 environments/argo2/observability.yaml delete mode 100644 environments/argo2/secrets-clickhouse.yaml delete mode 100644 environments/argo2/secrets-countly.yaml delete mode 100644 environments/argo2/secrets-kafka.yaml delete mode 100644 environments/argo2/secrets-migration.yaml delete mode 100644 environments/argo2/secrets-mongodb.yaml delete mode 100644 environments/argo2/secrets-observability.yaml delete mode 100644 environments/argo2/secrets.example.yaml delete mode 100644 environments/argo2/secrets.sops.example.yaml delete mode 100644 environments/argo3/README.md delete mode 100644 environments/argo3/clickhouse.yaml delete mode 100644 environments/argo3/countly-tls.env delete mode 100644 environments/argo3/countly.yaml delete mode 100644 environments/argo3/external-secrets.example.yaml delete mode 100644 environments/argo3/global.yaml delete mode 100644 environments/argo3/kafka.yaml delete mode 100644 environments/argo3/migration.yaml delete mode 100644 environments/argo3/mongodb.yaml delete mode 100644 environments/argo3/observability.yaml delete mode 100644 environments/argo3/secrets-clickhouse.yaml delete mode 100644 environments/argo3/secrets-countly.yaml delete mode 100644 environments/argo3/secrets-kafka.yaml delete mode 100644 environments/argo3/secrets-migration.yaml delete mode 100644 environments/argo3/secrets-mongodb.yaml delete mode 100644 environments/argo3/secrets-observability.yaml delete mode 100644 environments/argo3/secrets.example.yaml delete mode 100644 environments/argo3/secrets.sops.example.yaml delete mode 100644 environments/helm-argo-test/clickhouse.yaml delete mode 100644 environments/helm-argo-test/countly.yaml delete mode 100644 environments/helm-argo-test/global.yaml delete mode 100644 environments/helm-argo-test/kafka.yaml delete mode 100644 environments/helm-argo-test/migration.yaml delete mode 100644 environments/helm-argo-test/mongodb.yaml delete mode 100644 environments/helm-argo-test/observability.yaml delete mode 100644 environments/helm-argo-test/secrets-clickhouse.yaml delete mode 100644 environments/helm-argo-test/secrets-countly.yaml delete mode 100644 environments/helm-argo-test/secrets-kafka.yaml delete mode 100644 environments/helm-argo-test/secrets-migration.yaml delete mode 100644 environments/helm-argo-test/secrets-mongodb.yaml delete mode 100644 environments/helm-argo-test/secrets-observability.yaml delete mode 100644 environments/helm-argocd/clickhouse.yaml delete mode 100644 environments/helm-argocd/countly.yaml delete mode 100644 environments/helm-argocd/global.yaml delete mode 100644 environments/helm-argocd/kafka.yaml delete mode 100644 environments/helm-argocd/mongodb.yaml delete mode 100644 environments/helm-argocd/observability.yaml delete mode 100644 environments/helm-argocd/secrets-clickhouse.yaml delete mode 100644 environments/helm-argocd/secrets-countly.yaml delete mode 100644 environments/helm-argocd/secrets-kafka.yaml delete mode 100644 environments/helm-argocd/secrets-mongodb.yaml delete mode 100644 environments/helm-argocd/secrets-observability.yaml diff --git a/.gitignore b/.gitignore index 3919e59..8809778 100644 --- a/.gitignore +++ b/.gitignore @@ -10,9 +10,6 @@ secrets-*.yaml # Exception: reference environment templates (contain no real secrets) !environments/reference/secrets-*.yaml -!environments/argo1/secrets-*.yaml -!environments/argo2/secrets-*.yaml -!environments/argo3/secrets-*.yaml # Helmfile state helmfile.lock diff --git a/argocd/customers/argo1.yaml b/argocd/customers/argo1.yaml deleted file mode 100644 index 2913516..0000000 --- a/argocd/customers/argo1.yaml +++ /dev/null @@ -1,11 +0,0 @@ -customer: argo1 -environment: argo1 -project: countly-customers -server: https://35.226.153.84 -hostname: argo1.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/argocd/customers/argo2.yaml b/argocd/customers/argo2.yaml deleted file mode 100644 index fa992c3..0000000 --- a/argocd/customers/argo2.yaml +++ /dev/null @@ -1,11 +0,0 @@ -customer: argo2 -environment: argo2 -project: countly-customers -server: https://34.60.146.65 -hostname: argo2.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/argocd/customers/argo3.yaml b/argocd/customers/argo3.yaml deleted file mode 100644 index 557880b..0000000 --- a/argocd/customers/argo3.yaml +++ /dev/null @@ -1,11 +0,0 @@ -customer: argo3 -environment: argo3 -project: countly-customers -server: https://34.58.215.60 -hostname: argo3.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/environments/argo1/README.md b/environments/argo1/README.md deleted file mode 100644 index 12b374b..0000000 --- a/environments/argo1/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - -3. Fill in required secrets in the chart-specific files: - - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `clickhouse.yaml` → `auth.defaultUserPassword.password` - - `kafka.yaml` → `kafkaConnect.clickhouse.password` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `secrets-mongodb.yaml` | MongoDB user passwords | -| `secrets-clickhouse.yaml` | ClickHouse auth password | -| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | -| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/argo1/clickhouse.yaml b/environments/argo1/clickhouse.yaml deleted file mode 100644 index d7899d3..0000000 --- a/environments/argo1/clickhouse.yaml +++ /dev/null @@ -1,203 +0,0 @@ -# ============================================================================= -# ClickHouse Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-clickhouse/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Operator API Version --- -clickhouseOperator: - apiVersion: clickhouse.com/v1alpha1 - -# ============================================================================= -# Cluster Topology -# ============================================================================= -version: "26.2" -shards: 1 -replicas: 2 - -# ============================================================================= -# Images -# ============================================================================= -image: - server: clickhouse/clickhouse-server - keeper: clickhouse/clickhouse-keeper - -# ============================================================================= -# Database -# ============================================================================= -database: countly_drill - -# ============================================================================= -# Authentication -# ============================================================================= -auth: - # --- Default User Password --- - defaultUserPassword: - existingSecret: "" # Use an existing secret instead of creating one - secretName: clickhouse-default-password - key: password - password: "" # REQUIRED: ClickHouse default user password - - # --- Admin User (optional, separate from default) --- - adminUser: - enabled: false - # Precomputed SHA256 hex of the admin password (64 hex chars). - # Generate: echo -n 'your_password' | sha256sum | cut -d' ' -f1 - passwordSha256Hex: "" - -# ============================================================================= -# OpenTelemetry Server-Side Tracing -# ============================================================================= -# When enabled, ClickHouse logs spans to system.opentelemetry_span_log for -# queries arriving with W3C traceparent headers. -opentelemetry: - enabled: false - spanLog: - ttlDays: 7 - flushIntervalMs: 1000 - -# ============================================================================= -# Server -# ============================================================================= -server: - securityContext: - runAsNonRoot: true - runAsUser: 101 - runAsGroup: 101 - fsGroup: 101 - - resources: - requests: - cpu: "1" - memory: "4Gi" - limits: - cpu: "2" - memory: "8Gi" - - persistence: - storageClass: "" - size: 50Gi - - settings: - maxConnections: 4096 - extraConfig: "" # Raw XML injected into server config - extraUsersConfig: "" # Raw XML injected into users config - prometheus: - enabled: true - port: 9363 - endpoint: /metrics - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 - -# ============================================================================= -# Keeper (ClickHouse Keeper for replication coordination) -# ============================================================================= -keeper: - replicas: 1 - - securityContext: - runAsNonRoot: true - runAsUser: 101 - runAsGroup: 101 - fsGroup: 101 - - resources: - requests: - cpu: "250m" - memory: "512Mi" - limits: - cpu: "500m" - memory: "1Gi" - - persistence: - storageClass: "" - size: 5Gi - - settings: - prometheus: - enabled: true - port: 9090 - endpoint: /metrics - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - -# ============================================================================= -# Pod Disruption Budgets -# ============================================================================= -podDisruptionBudget: - server: - enabled: false - maxUnavailable: 1 - keeper: - enabled: false - maxUnavailable: 1 - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - - kafka - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-analytics-namespace - # ports: - # - port: 8123 - # protocol: TCP - -# ============================================================================= -# Service Monitor (Prometheus Operator CRD) -# ============================================================================= -serviceMonitor: - enabled: false - interval: "15s" - serviceType: headless # headless = per-pod scraping, clusterIP = any-pod - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall diff --git a/environments/argo1/countly-tls.env b/environments/argo1/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/argo1/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/argo1/countly.yaml b/environments/argo1/countly.yaml deleted file mode 100644 index ccf54bb..0000000 --- a/environments/argo1/countly.yaml +++ /dev/null @@ -1,570 +0,0 @@ -# ============================================================================= -# Countly Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Service Account --- -serviceAccount: - create: true - name: "" # Auto-derived from release name if empty - annotations: {} - -# --- Image --- -image: - repository: gcr.io/countly-dev-313620/countly-unified - digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" - tag: "26.01" # Fallback when digest is empty - pullPolicy: IfNotPresent - -# --- Cross-Namespace References --- -clickhouseNamespace: clickhouse -kafkaNamespace: kafka -mongodbNamespace: mongodb - -# ============================================================================= -# Component: API -# ============================================================================= -api: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:api"] - port: 3001 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 120 - terminationGracePeriodSeconds: 120 - resources: - requests: - cpu: "1" - memory: "3.5Gi" - limits: - cpu: "1" - memory: "4Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 6 - metrics: - cpu: - averageUtilization: 70 - memory: - averageUtilization: 80 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 # 1-100, only used with type=preferred - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Frontend -# ============================================================================= -frontend: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:frontend"] - port: 6001 - healthCheck: - path: /ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - terminationGracePeriodSeconds: 30 - resources: - requests: - cpu: "1" - memory: "2.5Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 1 - metrics: - cpu: - averageUtilization: 80 - memory: {} - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Ingestor -# ============================================================================= -ingestor: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:ingestor"] - port: 3010 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 120 - terminationGracePeriodSeconds: 120 - resources: - requests: - cpu: "1" - memory: "3Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 12 - metrics: - cpu: - averageUtilization: 65 - memory: - averageUtilization: 75 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Aggregator -# ============================================================================= -aggregator: - enabled: true - replicaCount: 4 - command: ["npm", "run", "start:aggregator"] - port: 0 # No HTTP port exposed - healthCheck: {} # No HTTP health check (no port) - terminationGracePeriodSeconds: 60 - resources: - requests: - cpu: "1" - memory: "3.5Gi" - limits: - cpu: "2" - memory: "4Gi" - hpa: - enabled: true - minReplicas: 4 - maxReplicas: 8 - metrics: - cpu: - averageUtilization: 65 - memory: - averageUtilization: 65 - behavior: - scaleUp: - stabilizationWindowSeconds: 0 - policies: - - type: Percent - value: 100 - periodSeconds: 15 - scaleDown: - stabilizationWindowSeconds: 300 - policies: - - type: Percent - value: 10 - periodSeconds: 60 - pdb: - enabled: true - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Job Server -# ============================================================================= -jobserver: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:jobserver"] - port: 3020 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - terminationGracePeriodSeconds: 30 - resources: - requests: - cpu: "1" - memory: "3Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 1 - metrics: - cpu: - averageUtilization: 80 - memory: - averageUtilization: 85 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: false - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Configuration (ConfigMaps) -# ============================================================================= -config: - # --- Common (shared by all components) --- - common: - NODE_ENV: production - COUNTLY_PLUGINS: "mobile,web,desktop,plugins,density,locale,browser,sources,views,logger,systemlogs,populator,reports,crashes,push,star-rating,slipping-away-users,compare,server-stats,dbviewer,crash_symbolication,crash-analytics,alerts,onboarding,consolidate,remote-config,hooks,dashboards,sdk,data-manager,guides,heatmaps,retention_segments,formulas,funnels,cohorts,ab-testing,performance-monitoring,config-transfer,data-migration,two-factor-auth,blocking,concurrent_users,revenue,activity-map,flows,surveys,event-timeline,drill,multi,active_users,ip-blocker,kafka,clickhouse" - COUNTLY_CONFIG__FILESTORAGE: gridfs - COUNTLY_CONFIG__DRILL_EVENTS_DRIVER: clickhouse - COUNTLY_CONFIG__SHARED_CONNECTION: "true" - COUNTLY_CONFIG__DATABASE_ADAPTERPREFERENCE: '["clickhouse","mongodb"]' - COUNTLY_CONFIG__DATABASE_ADAPTERS_MONGODB_ENABLED: "true" - COUNTLY_CONFIG__DATABASE_ADAPTERS_CLICKHOUSE_ENABLED: "true" - COUNTLY_CONFIG__DATABASE_FAILONCONNECTIONERROR: "true" - COUNTLY_CONFIG__EVENTSINK_SINKS: '["kafka"]' - COUNTLY_CONFIG__RELOADCONFIGAFTER: "10000" - - # --- API --- - api: - COUNTLY_CONTAINER: api - COUNTLY_CONFIG__API_PORT: "3001" - COUNTLY_CONFIG__API_HOST: "0.0.0.0" - COUNTLY_CONFIG__API_MAX_SOCKETS: "1024" - COUNTLY_CONFIG__API_MAX_UPLOAD_FILE_SIZE: "209715200" # 200 MiB - COUNTLY_CONFIG__API_TIMEOUT: "120000" # ms - - # --- Frontend --- - frontend: - COUNTLY_CONTAINER: frontend - COUNTLY_CONFIG__WEB_PORT: "6001" - COUNTLY_CONFIG__WEB_HOST: "0.0.0.0" - COUNTLY_CONFIG__WEB_SECURE_COOKIES: "false" - COUNTLY_CONFIG__COOKIE_MAXAGE: "86400000" # 24 hours in ms - - # --- Ingestor --- - ingestor: - COUNTLY_CONTAINER: ingestor - COUNTLY_CONFIG__INGESTOR_PORT: "3010" - COUNTLY_CONFIG__INGESTOR_HOST: "0.0.0.0" - - # --- Aggregator --- - aggregator: - COUNTLY_CONTAINER: aggregator - UV_THREADPOOL_SIZE: "6" - - # --- Job Server --- - jobserver: - COUNTLY_CONTAINER: jobserver - COUNTLY_CONFIG__JOBSERVER_PORT: "3020" - COUNTLY_CONFIG__JOBSERVER_HOST: "0.0.0.0" - - # --- ClickHouse Connection --- - clickhouse: - COUNTLY_CONFIG__CLICKHOUSE_QUERYOPTIONS_MAX_EXECUTION_TIME: "600" - COUNTLY_CONFIG__CLICKHOUSE_REQUEST_TIMEOUT: "1200000" # ms - COUNTLY_CONFIG__CLICKHOUSE_MAX_OPEN_CONNECTIONS: "10" - COUNTLY_CONFIG__CLICKHOUSE_APPLICATION: countly_drill - COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_REQUEST: "false" - COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_RESPONSE: "false" - COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_ENABLED: "true" - COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_IDLE_SOCKET_TTL: "10000" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_NAME: countly_cluster - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_SHARDS: "false" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_REPLICAS: "false" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_ISCLOUD: "false" - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_COORDINATORTYPE: keeper - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_ZKPATH: "/clickhouse/tables/{shard}/{database}/{table}" - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_REPLICANAME: "{replica}" - COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_ENABLED: "false" - COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_MAXPARALLELREPLICAS: "2" - COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_WRITETHROUGH: "true" - COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_INSERTDISTRIBUTEDSYNC: "true" - COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_NATIVEPORT: "9000" - COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_SECURE: "false" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_DAYSOLD: "30" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MIN: "60" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MAX: "120" - - # --- Kafka Connection --- - kafka: - COUNTLY_CONFIG__KAFKA_ENABLED: "true" - COUNTLY_CONFIG__KAFKA_DRILLEVENTSTOPIC: drill-events - COUNTLY_CONFIG__KAFKA_CLUSTER_NAME: cly-kafka - COUNTLY_CONFIG__KAFKA_PARTITIONS: "100" - COUNTLY_CONFIG__KAFKA_REPLICATIONFACTOR: "2" - COUNTLY_CONFIG__KAFKA_RETENTIONMS: "604800000" # 7 days in ms - COUNTLY_CONFIG__KAFKA_ENABLETRANSACTIONS: "false" - COUNTLY_CONFIG__KAFKA_TRANSACTIONTIMEOUT: "60000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_CLIENTID: countly-app - COUNTLY_CONFIG__KAFKA_RDKAFKA_REQUESTTIMEOUTMS: "20000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_CONNECTIONTIMEOUTMS: "8000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_LINGERMS: "10" - COUNTLY_CONFIG__KAFKA_RDKAFKA_RETRIES: "5" - COUNTLY_CONFIG__KAFKA_RDKAFKA_ACKS: "-1" # -1 = all ISR replicas must acknowledge - COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMINBYTES: "1024000" - COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMAXWAITMS: "1200" - COUNTLY_CONFIG__KAFKA_CONSUMER_SESSIONTIMEOUTMS: "120000" - COUNTLY_CONFIG__KAFKA_CONSUMER_HEARTBEATINTERVALMS: "20000" - COUNTLY_CONFIG__KAFKA_CONSUMER_AUTOOFFSETRESET: earliest - COUNTLY_CONFIG__KAFKA_CONSUMER_ENABLEAUTOCOMMIT: "false" - COUNTLY_CONFIG__KAFKA_CONSUMER_MAXPOLLINTERVALMS: "600000" - COUNTLY_CONFIG__KAFKA_CONNECTCONSUMERGROUPID: "connect-ch" - - # --- OpenTelemetry --- - otel: - OTEL_ENABLED: "false" - OTEL_EXPORTER_OTLP_ENDPOINT: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4318" - OTEL_EXPORTER_OTLP_PROTOCOL: "http/protobuf" - OTEL_TRACES_SAMPLER: "parentbased_traceidratio" - OTEL_TRACES_SAMPLER_ARG: "1.0" # 0.0-1.0, fraction of traces to sample - PYROSCOPE_ENABLED: "false" - -# --- Node.js Options (injected into configmap per component) --- -nodeOptions: - api: "--max-old-space-size=3072 --max-semi-space-size=256" - frontend: "--max-old-space-size=2048" - ingestor: "--max-old-space-size=2048 --max-semi-space-size=256" - aggregator: "--max-old-space-size=3072 --max-semi-space-size=128" - jobserver: "--max-old-space-size=2048 --max-semi-space-size=256" - -# ============================================================================= -# Backing Service Modes -# ============================================================================= -# When mode=external, the corresponding chart is not deployed and connection -# details below are used instead. - -backingServices: - mongodb: - mode: bundled # bundled | external - host: "" - port: "27017" - connectionString: "" # If set, used as-is (bypasses host/port/user/pass) - username: "app" - password: "" - database: "admin" - replicaSet: "" - existingSecret: "" - # --- External MongoDB Atlas example --- - # mode: external - # connectionString: "mongodb+srv://user:pass@cluster0.example.mongodb.net/admin?retryWrites=true&w=majority" - - clickhouse: - mode: bundled # bundled | external - host: "" - port: "8123" - tls: "false" - username: "default" - password: "" - database: "countly_drill" - existingSecret: "" - # --- External ClickHouse Cloud example --- - # mode: external - # host: "abc123.us-east-1.aws.clickhouse.cloud" - # port: "8443" - # tls: "true" - - kafka: - mode: bundled # bundled | external - brokers: "" # Comma-separated broker list - securityProtocol: "PLAINTEXT" # PLAINTEXT | SSL | SASL_PLAINTEXT | SASL_SSL - saslMechanism: "" - saslUsername: "" - saslPassword: "" - existingSecret: "" - # --- External Confluent Cloud example --- - # mode: external - # brokers: "pkc-12345.us-east-1.aws.confluent.cloud:9092" - # securityProtocol: "SASL_SSL" - # saslMechanism: "PLAIN" - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - mode: values # values | existingSecret | externalSecret - keep: true # Retain secrets on helm uninstall - rotationId: "" # Change to force secret re-creation - - common: - existingSecret: "" - encryptionReportsKey: "" # REQUIRED: min 8 chars - webSessionSecret: "" # REQUIRED: min 8 chars - passwordSecret: "" # REQUIRED: min 8 chars - - clickhouse: - existingSecret: "" - username: "" - password: "" - database: "" - - kafka: - existingSecret: "" - securityProtocol: "" - saslMechanism: "" - saslUsername: "" - saslPassword: "" - - mongodb: - existingSecret: "" - key: "connectionString.standard" # Key within the secret to read - password: "" # REQUIRED on first install (must match users.app.password in countly-mongodb) - - # --- ExternalSecret configuration (used only when mode=externalSecret) --- - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: "" - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: "" - webSessionSecret: "" - passwordSecret: "" - clickhouse: - url: "" - username: "" - password: "" - database: "" - kafka: - brokers: "" - securityProtocol: "" - saslMechanism: "" - saslUsername: "" - saslPassword: "" - mongodb: - connectionString: "" - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - ingressNamespaceSelector: - kubernetes.io/metadata.name: ingress-nginx - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-custom-namespace - # ports: - # - port: 3001 - # protocol: TCP - -# ============================================================================= -# Ingress -# ============================================================================= -ingress: - enabled: true - className: nginx - annotations: - # F5 NGINX Ingress Controller (OSS) annotations - nginx.org/client-max-body-size: "50m" - nginx.org/proxy-buffering: "True" - nginx.org/proxy-buffer-size: "256k" - nginx.org/proxy-buffers: "16 256k" - nginx.org/proxy-busy-buffers-size: "512k" - nginx.org/proxy-max-temp-file-size: "2048m" - nginx.org/client-body-buffer-size: "2m" - nginx.org/proxy-connect-timeout: "60s" - nginx.org/proxy-read-timeout: "120s" - nginx.org/proxy-send-timeout: "120s" - nginx.org/keepalive: "256" - nginx.org/server-snippets: | - otel_trace on; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Scheme $scheme; - proxy_set_header Connection ""; - proxy_set_header X-Request-ID $request_id; - proxy_set_header X-Request-Start $msec; - # traceparent/tracestate now handled by ngx_otel_module (otel-trace-context: propagate) - client_header_timeout 30s; - nginx.org/location-snippets: | - proxy_request_buffering on; - proxy_next_upstream error timeout http_502 http_503 http_504; - proxy_next_upstream_timeout 30s; - proxy_next_upstream_tries 3; - proxy_temp_file_write_size 1m; - client_body_timeout 120s; - hostname: "" # Set via argocd/customers/.yaml - tls: - # TLS mode: letsencrypt | existingSecret | selfSigned | http - # http: No TLS - # letsencrypt: cert-manager + Let's Encrypt (recommended for production) - # existingSecret: Bring your own TLS secret - # selfSigned: cert-manager self-signed CA (for development) - mode: "" # Set via argocd/customers/.yaml - clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt - secretName: "" # Auto-derived if empty: -tls - selfSigned: - issuerName: "" # Auto-derived if empty: -ca-issuer - caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/environments/argo1/external-secrets.example.yaml b/environments/argo1/external-secrets.example.yaml deleted file mode 100644 index 7bf93ef..0000000 --- a/environments/argo1/external-secrets.example.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in environments//countly.yaml: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: my-secret-store -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "countly/encryption-reports-key" -# webSessionSecret: "countly/web-session-secret" -# passwordSecret: "countly/password-secret" -# clickhouse: -# url: "countly/clickhouse-url" -# username: "countly/clickhouse-username" -# password: "countly/clickhouse-password" -# database: "countly/clickhouse-database" -# kafka: -# brokers: "countly/kafka-brokers" -# securityProtocol: "countly/kafka-security-protocol" -# mongodb: -# connectionString: "countly/mongodb-connection-string" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/argo1/global.yaml b/environments/argo1/global.yaml deleted file mode 100644 index c3bf726..0000000 --- a/environments/argo1/global.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - storageClass: "" - imagePullSecrets: [] - -ingress: - hostname: argo1.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/argo1/kafka.yaml b/environments/argo1/kafka.yaml deleted file mode 100644 index f4edaab..0000000 --- a/environments/argo1/kafka.yaml +++ /dev/null @@ -1,385 +0,0 @@ -# ============================================================================= -# Kafka Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-kafka/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Strimzi Operator API Version --- -strimzi: - apiVersion: kafka.strimzi.io/v1 - -# --- Kafka Version --- -version: "4.2.0" - -# ============================================================================= -# Brokers -# ============================================================================= -brokers: - replicas: 3 - resources: - requests: - cpu: "1" - memory: "4Gi" - limits: - cpu: "1" - memory: "4Gi" - jvmOptions: - xms: "2g" - xmx: "2g" - - # --- Persistence --- - persistence: - volumes: - - id: 0 - size: 100Gi - storageClass: "" - deleteClaim: false # Delete PVC when broker is removed - - # --- Broker Config --- - config: - default.replication.factor: 2 - min.insync.replicas: 2 - log.retention.hours: 168 # 7 days - log.segment.bytes: "1073741824" # 1 GiB - compression.type: lz4 - auto.create.topics.enable: false - offsets.topic.replication.factor: 2 - num.partitions: 24 - transaction.state.log.replication.factor: 2 - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - -# ============================================================================= -# Controllers (KRaft) -# ============================================================================= -controllers: - replicas: 3 - resources: - requests: - cpu: "500m" - memory: "2Gi" - limits: - cpu: "1" - memory: "2Gi" - - persistence: - size: 20Gi - storageClass: "" - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - -# ============================================================================= -# Listeners -# ============================================================================= -listeners: - - name: internal - port: 9092 - type: internal - tls: false - -# ============================================================================= -# Cruise Control -# ============================================================================= -cruiseControl: - enabled: true - resources: - requests: - cpu: "1" - memory: "2Gi" - limits: - cpu: "1" - memory: "2Gi" - jvmOptions: - xms: "1g" - xmx: "2g" - autoRebalance: - - mode: add-brokers - - mode: remove-brokers - -# ============================================================================= -# Kafka Connect (ClickHouse Sink) -# ============================================================================= -kafkaConnect: - enabled: true - name: connect-ch - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" - replicas: 2 - bootstrapServers: "" # Auto-derived from cluster if empty - - resources: - requests: - cpu: "2" - memory: "8Gi" - limits: - cpu: "2" - memory: "8Gi" - jvmOptions: - xms: "5g" - xmx: "5g" - - # --- Worker Configuration --- - workerConfig: - group.id: connect-ch - config.storage.topic: connect_ch_configs - offset.storage.topic: connect_ch_offsets - status.storage.topic: connect_ch_status - config.storage.replication.factor: 2 - offset.storage.replication.factor: 2 - status.storage.replication.factor: 2 - offset.storage.partitions: 25 - status.storage.partitions: 5 - key.converter: org.apache.kafka.connect.storage.StringConverter - value.converter: org.apache.kafka.connect.json.JsonConverter - value.converter.schemas.enable: "false" - connector.client.config.override.policy: All - config.providers: env - config.providers.env.class: org.apache.kafka.common.config.provider.EnvVarConfigProvider - - # --- ClickHouse Connection (for the sink connector) --- - clickhouse: - existingSecret: "" - secretName: clickhouse-auth - host: "" # Auto-derived from clickhouseNamespace if empty - port: "8123" - ssl: "false" - database: "countly_drill" - username: "default" - password: "" # REQUIRED: must match ClickHouse default user password - - # --- Environment Variables (injected into Connect pods) --- - env: - EXACTLY_ONCE: "false" - ERRORS_RETRY_TIMEOUT: "300" - ERRORS_TOLERANCE: "none" # none | all - CLICKHOUSE_SETTINGS: "input_format_binary_read_json_as_string=1,allow_experimental_json_type=1,enable_json_type=1,async_insert=1,wait_for_async_insert=1,async_insert_use_adaptive_busy_timeout=1,async_insert_busy_timeout_ms=10000,async_insert_max_data_size=268435456,async_insert_max_query_number=64,min_insert_block_size_rows=250000,min_insert_block_size_bytes=268435456,max_partitions_per_insert_block=500" - BYPASS_ROW_BINARY: "false" - TABLE_REFRESH_INTERVAL: "300" # seconds - KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter - VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter - VALUE_CONVERTER_SCHEMAS_ENABLE: "false" - KAFKA_CONSUMER_FETCH_MIN_BYTES: "33554432" # 32 MiB - KAFKA_CONSUMER_FETCH_MAX_WAIT_MS: "60000" - KAFKA_CONSUMER_MAX_POLL_RECORDS: "250000" - KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES: "134217728" # 128 MiB - KAFKA_CONSUMER_FETCH_MAX_BYTES: "536870912" # 512 MiB - KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS: "900000" - KAFKA_CONSUMER_SESSION_TIMEOUT_MS: "45000" - KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS: "15000" - KAFKA_CONSUMER_REQUEST_TIMEOUT_MS: "120000" - - # --- HPA --- - hpa: - enabled: false - minReplicas: 1 - maxReplicas: 3 - metrics: - cpu: - averageUtilization: 70 - memory: - averageUtilization: 80 - behavior: - scaleUp: - stabilizationWindowSeconds: 120 - policies: - - type: Percent - value: 50 - periodSeconds: 120 - - type: Pods - value: 2 - periodSeconds: 120 - selectPolicy: Min - scaleDown: - stabilizationWindowSeconds: 600 - policies: - - type: Percent - value: 25 - periodSeconds: 300 - - type: Pods - value: 1 - periodSeconds: 300 - selectPolicy: Min - - # --- OpenTelemetry Java Agent --- - # Baked into the Docker image at /opt/otel/opentelemetry-javaagent.jar. - # When enabled, JAVA_TOOL_OPTIONS activates it for Kafka consumer/producer - # and outbound HTTP (ClickHouse sink) span creation. - otel: - enabled: false - serviceName: "kafka-connect" - exporterEndpoint: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4317" - exporterProtocol: "grpc" - sampler: "parentbased_traceidratio" - samplerArg: "1.0" - resourceAttributes: "" # e.g. "deployment.environment=production,k8s.cluster.name=my-cluster" - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - - # --- Connectors --- - connectors: - - name: ch-sink-drill-events - enabled: true - state: running # running | paused | stopped - class: com.clickhouse.kafka.connect.ClickHouseSinkConnector - tasksMax: 1 - autoRestart: - enabled: true - maxRestarts: 10 - config: - topics: drill-events - topic2TableMap: "drill-events=drill_events" - hostname: "${env:CLICKHOUSE_HOST}" - port: "${env:CLICKHOUSE_PORT}" - ssl: "${env:CLICKHOUSE_SSL}" - database: "${env:CLICKHOUSE_DB}" - username: "${env:CLICKHOUSE_USER}" - password: "${env:CLICKHOUSE_PASSWORD}" - exactlyOnce: "${env:EXACTLY_ONCE}" - errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" - errors.tolerance: "${env:ERRORS_TOLERANCE}" - clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" - bypassRowBinary: "${env:BYPASS_ROW_BINARY}" - tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" - key.converter: "${env:KEY_CONVERTER}" - value.converter: "${env:VALUE_CONVERTER}" - value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" - consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" - consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" - consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" - consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" - consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" - consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" - consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" - consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" - consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" - connection.timeout: "60" - socket.timeout: "30000" - retry.count: "3" - connection.pool.size: "10" - healthcheck.enabled: "true" - healthcheck.interval: "10000" - dlq: {} # Dead-letter queue config (empty = disabled) - -# ============================================================================= -# Metrics -# ============================================================================= -metrics: - enabled: true - -# --- Cross-Namespace Reference --- -clickhouseNamespace: clickhouse - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 9092 - # protocol: TCP - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall -# Kafka overrides for argo1 -# -# Migration is disabled for this customer, so the drill ClickHouse sink -# connector must stay off to avoid waiting on migration-owned tables. - -kafkaConnect: - connectors: - - name: ch-sink-drill-events - enabled: false - state: running - class: com.clickhouse.kafka.connect.ClickHouseSinkConnector - tasksMax: 1 - autoRestart: - enabled: true - maxRestarts: 10 - config: - topics: drill-events - topic2TableMap: "drill-events=drill_events" - hostname: "${env:CLICKHOUSE_HOST}" - port: "${env:CLICKHOUSE_PORT}" - ssl: "${env:CLICKHOUSE_SSL}" - database: "${env:CLICKHOUSE_DB}" - username: "${env:CLICKHOUSE_USER}" - password: "${env:CLICKHOUSE_PASSWORD}" - exactlyOnce: "${env:EXACTLY_ONCE}" - errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" - errors.tolerance: "${env:ERRORS_TOLERANCE}" - clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" - bypassRowBinary: "${env:BYPASS_ROW_BINARY}" - tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" - key.converter: "${env:KEY_CONVERTER}" - value.converter: "${env:VALUE_CONVERTER}" - value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" - consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" - consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" - consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" - consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" - consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" - consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" - consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" - consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" - consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" - connection.timeout: "60" - socket.timeout: "30000" - retry.count: "3" - connection.pool.size: "10" - healthcheck.enabled: "true" - healthcheck.interval: "10000" - dlq: {} diff --git a/environments/argo1/migration.yaml b/environments/argo1/migration.yaml deleted file mode 100644 index 6fa760c..0000000 --- a/environments/argo1/migration.yaml +++ /dev/null @@ -1,3 +0,0 @@ -# Migration overrides for optional countly-migration app. -# Enable per customer by setting `migration: enabled` in argocd/customers/.yaml -# and then filling this file with environment-specific overrides as needed. diff --git a/environments/argo1/mongodb.yaml b/environments/argo1/mongodb.yaml deleted file mode 100644 index 31f230e..0000000 --- a/environments/argo1/mongodb.yaml +++ /dev/null @@ -1,144 +0,0 @@ -# ============================================================================= -# MongoDB Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-mongodb/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# ============================================================================= -# MongoDB Server -# ============================================================================= -mongodb: - version: "8.2.5" - members: 2 # Replica set member count - - resources: - requests: - cpu: "500m" - memory: "2Gi" - limits: - cpu: "2" - memory: "8Gi" - - persistence: - storageClass: "" # Overrides global.storageClass for MongoDB PVCs - size: 100Gi - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 - - # --- TLS --- - tls: - enabled: false - -# ============================================================================= -# Users -# ============================================================================= -users: - # --- Application User --- - app: - name: app - database: admin - roles: - - name: readWriteAnyDatabase - db: admin - - name: dbAdmin - db: admin - passwordSecretName: app-user-password - passwordSecretKey: password - password: "" # REQUIRED on first install - - # --- Metrics Exporter User --- - metrics: - enabled: true - name: metrics - database: admin - roles: - - name: clusterMonitor - db: admin - - name: read - db: local - passwordSecretName: metrics-user-password - passwordSecretKey: password - password: "" # REQUIRED on first install - -# ============================================================================= -# Prometheus Exporter -# ============================================================================= -exporter: - enabled: true - image: percona/mongodb_exporter:0.40.0 - port: 9216 - resources: - requests: - cpu: "50m" - memory: "64Mi" - limits: - cpu: "200m" - memory: "256Mi" - service: - enabled: true - args: - - --collect-all - - --collector.diagnosticdata - - --collector.replicasetstatus - - --collector.dbstats - - --collector.topmetrics - - --collector.indexstats - - --collector.collstats - -# ============================================================================= -# Pod Disruption Budget -# ============================================================================= -podDisruptionBudget: - enabled: false - maxUnavailable: 1 - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 27017 - # protocol: TCP - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall diff --git a/environments/argo1/observability.yaml b/environments/argo1/observability.yaml deleted file mode 100644 index 79a4b39..0000000 --- a/environments/argo1/observability.yaml +++ /dev/null @@ -1,437 +0,0 @@ -# ============================================================================= -# Observability Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-observability/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Deployment Mode --- -# full — All backends + Grafana in-cluster -# hybrid — All backends in-cluster, no Grafana (use external Grafana) -# external — Collectors only, forward to external endpoints -# disabled — Set in global.yaml observability.mode to skip this chart entirely -mode: full - -# --- Cluster Name (injected into Prometheus external_labels) --- -clusterName: countly-local - -# --- Cross-Namespace References --- -countlyNamespace: countly -clickhouseNamespace: clickhouse -mongodbNamespace: mongodb -kafkaNamespace: kafka -ingressNamespace: ingress-nginx -certManagerNamespace: cert-manager -clickhouseOperatorNamespace: clickhouse-operator-system - -# --- NGINX Ingress Controller Scrape Configuration --- -nginxIngress: - podLabelName: "nginx-ingress" # F5 NGINX IC = "nginx-ingress", community = "ingress-nginx" - metricsPort: "9113" - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -# ============================================================================= -# Per-Signal Configuration -# ============================================================================= - -# --- Metrics --- -metrics: - enabled: true - sampling: - interval: "15s" # Global Prometheus scrape_interval - -# --- Traces --- -traces: - enabled: true - sampling: - strategy: "AlwaysOn" # AlwaysOn | TraceIdRatio | ParentBased | TailBased - ratio: 1.0 # 0.0-1.0, used with TraceIdRatio or ParentBased - tailSampling: # Only used when strategy == TailBased - waitDuration: "10s" - numTraces: 50000 - policies: - keepErrors: true - latencyThresholdMs: 2000 - baselineRatio: 0.1 - -# --- Logs --- -logs: - enabled: true - sampling: - enabled: false - dropRate: 0 # 0.0-1.0, fraction of logs to drop - -# --- Profiling --- -profiling: - enabled: true - sampling: - rate: "100" # Advisory — used in NOTES.txt for SDK config - -# ============================================================================= -# Prometheus -# ============================================================================= -prometheus: - image: - repository: prom/prometheus - tag: "v3.10.0" - retention: - time: "30d" - size: "50GB" - storage: - size: 100Gi - storageClass: "" - resources: - requests: - cpu: "2" - memory: "3Gi" - limits: - cpu: "2" - memory: "4Gi" - extraArgs: [] - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - remoteWriteUrl: "" # Full Prometheus remote write URL (e.g. https://prom.corp.com/api/v1/write) - -# ============================================================================= -# Loki -# ============================================================================= -loki: - image: - repository: grafana/loki - tag: "3.6.7" - retention: "30d" - storage: - backend: "filesystem" # filesystem | s3 | gcs | azure - size: 100Gi - storageClass: "" - # Object storage settings (only used when backend != filesystem) - bucket: "" # Bucket/container name (REQUIRED for object backends) - endpoint: "" # Custom endpoint (e.g. MinIO: http://minio:9000) - region: "" # Cloud region (S3) - insecure: false # Use HTTP instead of HTTPS - forcePathStyle: false # S3 path-style access (required for MinIO) - # Credential file secret (for GCS JSON key files) - existingSecret: "" # K8s Secret name to mount - secretKey: "key.json" # Key within the Secret - secretMountPath: "/var/secrets/storage" - # Env-based credentials (for AWS access keys, Azure account keys) - envFromSecret: "" # K8s Secret name to inject as env vars - # Provider-specific passthrough (rendered directly into provider block) - config: {} - # - # --- Object storage examples (apply to loki, tempo, and pyroscope) --- - # - # --- AWS S3 example --- - # backend: s3 - # s3: - # bucket: my-loki-data - # region: us-east-1 - # endpoint: "" - # insecure: false - # forcePathStyle: false - # credentials: - # source: envFromSecret - # envFromSecret: loki-s3-credentials # Must contain AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY - # - # --- GCS example --- - # backend: gcs - # gcs: - # bucket: my-loki-data - # credentials: - # source: existingSecret - # existingSecret: loki-gcs-key - # secretKey: "key.json" - # - # --- MinIO example --- - # backend: s3 - # s3: - # bucket: loki - # endpoint: minio.storage.svc.cluster.local:9000 - # insecure: true - # forcePathStyle: true - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1" - memory: "2Gi" - config: - maxStreamsPerUser: 30000 - maxLineSize: 256000 - ingestionRateMb: 64 - ingestionBurstSizeMb: 128 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - pushUrl: "" # Full Loki push URL (e.g. https://loki.corp.com/loki/api/v1/push) - -# ============================================================================= -# Tempo -# ============================================================================= -tempo: - image: - repository: grafana/tempo - tag: "2.10.1" - retention: "12h" - storage: - backend: "local" # local | s3 | gcs | azure - size: 150Gi - storageClass: "" - bucket: "" - endpoint: "" - region: "" - insecure: false - forcePathStyle: false - existingSecret: "" - secretKey: "key.json" - secretMountPath: "/var/secrets/storage" - envFromSecret: "" - config: {} - resources: - requests: - cpu: "3" - memory: "6Gi" - limits: - cpu: "4" - memory: "10Gi" - config: - ingestionRateLimitBytes: 100000000 - ingestionBurstSizeBytes: 150000000 - maxTracesPerUser: 50000 - maxBytesPerTrace: 5000000 - maxRecvMsgSizeMiB: 16 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - otlpGrpcEndpoint: "" # Tempo OTLP gRPC host:port (e.g. tempo.corp.com:4317) - otlpHttpEndpoint: "" # Tempo OTLP HTTP URL (optional fallback) - -# ============================================================================= -# Pyroscope -# ============================================================================= -pyroscope: - image: - repository: grafana/pyroscope - tag: "1.18.1" - retention: "72h" - storage: - backend: "filesystem" # filesystem | s3 | gcs | azure | swift - size: 20Gi - storageClass: "" - bucket: "" - endpoint: "" - region: "" - insecure: false - forcePathStyle: false - existingSecret: "" - secretKey: "key.json" - secretMountPath: "/var/secrets/storage" - envFromSecret: "" - config: {} - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1" - memory: "2Gi" - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - ingestUrl: "" # Pyroscope ingest URL (e.g. https://pyroscope.corp.com) - -# ============================================================================= -# Grafana -# ============================================================================= -grafana: - enabled: true # Only deployed when mode == "full" - image: - repository: grafana/grafana - tag: "12.4.0" - admin: - existingSecret: "" # Use an existing Secret for admin credentials - userKey: "admin-user" - passwordKey: "admin-password" - persistence: - enabled: false # Ephemeral by default (declarative config, no state to lose) - size: 10Gi - storageClass: "" - resources: - requests: - cpu: "1" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - plugins: - install: "grafana-pyroscope-datasource" - featureToggles: "tempoSearch,tempoBackendSearch,traceqlEditor,exploreTraces" - dashboards: - enabled: true - overview: true - platform: true - countly: true - data: true - edge: true - pdb: - enabled: false - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - url: "" # External Grafana URL (for NOTES.txt only) - -# ============================================================================= -# Alloy (DaemonSet — log collection) -# ============================================================================= -alloy: - image: - repository: grafana/alloy - tag: "v1.13.2" - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - scheduling: - nodeSelector: - kubernetes.io/os: linux - tolerations: [] - -# ============================================================================= -# Alloy-OTLP (Deployment — OTLP traces + profiling receive) -# ============================================================================= -alloyOtlp: - image: - repository: grafana/alloy - tag: "v1.13.2" - replicas: 1 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - memoryLimiter: - limit: "1600MiB" # Must be < resources.limits.memory - spikeLimit: "400MiB" - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# Alloy-Metrics (Deployment — ALL Prometheus scraping) -# ============================================================================= -alloyMetrics: - image: - repository: grafana/alloy - tag: "v1.13.2" - replicas: 1 - resources: - requests: - cpu: "500m" - memory: "512Mi" - limits: - cpu: "500m" - memory: "512Mi" - pdb: - enabled: false - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# kube-state-metrics -# ============================================================================= -kubeStateMetrics: - enabled: true - image: - repository: registry.k8s.io/kube-state-metrics/kube-state-metrics - tag: "v2.18.0" - resources: - requests: - cpu: "10m" - memory: "32Mi" - limits: - cpu: "100m" - memory: "256Mi" - namespaces: - - countly - - observability - - ingress-nginx - - kube-system - - clickhouse - - mongodb - - kafka - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# node-exporter -# ============================================================================= -nodeExporter: - enabled: true - image: - repository: prom/node-exporter - tag: "v1.10.2" - resources: - requests: - cpu: "100m" - memory: "180Mi" - limits: - cpu: "250m" - memory: "300Mi" - -# ============================================================================= -# Ingress (for Grafana) -# ============================================================================= -ingress: - enabled: false - className: nginx - annotations: {} - hosts: - - host: obs.example.com - tls: [] - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 4318 - # protocol: TCP diff --git a/environments/argo1/secrets-clickhouse.yaml b/environments/argo1/secrets-clickhouse.yaml deleted file mode 100644 index 653806f..0000000 --- a/environments/argo1/secrets-clickhouse.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# ClickHouse direct secrets for argo1 -auth: - defaultUserPassword: - password: "argo1-clickhouse-2026" diff --git a/environments/argo1/secrets-countly.yaml b/environments/argo1/secrets-countly.yaml deleted file mode 100644 index d971003..0000000 --- a/environments/argo1/secrets-countly.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Countly direct secrets for argo1 -secrets: - mode: values - common: - encryptionReportsKey: "argo1-reports-key-2026" - webSessionSecret: "argo1-web-session-2026" - passwordSecret: "argo1-password-secret-2026" - clickhouse: - username: "default" - password: "argo1-clickhouse-2026" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - mongodb: - password: "argo1-mongo-2026" diff --git a/environments/argo1/secrets-kafka.yaml b/environments/argo1/secrets-kafka.yaml deleted file mode 100644 index 3afbe21..0000000 --- a/environments/argo1/secrets-kafka.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Kafka direct secrets for argo1 -kafkaConnect: - clickhouse: - password: "argo1-clickhouse-2026" diff --git a/environments/argo1/secrets-migration.yaml b/environments/argo1/secrets-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/argo1/secrets-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/argo1/secrets-mongodb.yaml b/environments/argo1/secrets-mongodb.yaml deleted file mode 100644 index d0e245d..0000000 --- a/environments/argo1/secrets-mongodb.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# MongoDB direct secrets for argo1 -users: - app: - password: "argo1-mongo-2026" - metrics: - enabled: true - password: "argo1-metrics-2026" diff --git a/environments/argo1/secrets-observability.yaml b/environments/argo1/secrets-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/argo1/secrets-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/argo1/secrets.example.yaml b/environments/argo1/secrets.example.yaml deleted file mode 100644 index 282eb0d..0000000 --- a/environments/argo1/secrets.example.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//secrets-countly.yaml) --- -secrets: - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- -users: - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//secrets-kafka.yaml) --- -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" diff --git a/environments/argo1/secrets.sops.example.yaml b/environments/argo1/secrets.sops.example.yaml deleted file mode 100644 index 9b652d1..0000000 --- a/environments/argo1/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//secrets-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//secrets-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx diff --git a/environments/argo2/README.md b/environments/argo2/README.md deleted file mode 100644 index 12b374b..0000000 --- a/environments/argo2/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - -3. Fill in required secrets in the chart-specific files: - - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `clickhouse.yaml` → `auth.defaultUserPassword.password` - - `kafka.yaml` → `kafkaConnect.clickhouse.password` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `secrets-mongodb.yaml` | MongoDB user passwords | -| `secrets-clickhouse.yaml` | ClickHouse auth password | -| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | -| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/argo2/clickhouse.yaml b/environments/argo2/clickhouse.yaml deleted file mode 100644 index d7899d3..0000000 --- a/environments/argo2/clickhouse.yaml +++ /dev/null @@ -1,203 +0,0 @@ -# ============================================================================= -# ClickHouse Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-clickhouse/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Operator API Version --- -clickhouseOperator: - apiVersion: clickhouse.com/v1alpha1 - -# ============================================================================= -# Cluster Topology -# ============================================================================= -version: "26.2" -shards: 1 -replicas: 2 - -# ============================================================================= -# Images -# ============================================================================= -image: - server: clickhouse/clickhouse-server - keeper: clickhouse/clickhouse-keeper - -# ============================================================================= -# Database -# ============================================================================= -database: countly_drill - -# ============================================================================= -# Authentication -# ============================================================================= -auth: - # --- Default User Password --- - defaultUserPassword: - existingSecret: "" # Use an existing secret instead of creating one - secretName: clickhouse-default-password - key: password - password: "" # REQUIRED: ClickHouse default user password - - # --- Admin User (optional, separate from default) --- - adminUser: - enabled: false - # Precomputed SHA256 hex of the admin password (64 hex chars). - # Generate: echo -n 'your_password' | sha256sum | cut -d' ' -f1 - passwordSha256Hex: "" - -# ============================================================================= -# OpenTelemetry Server-Side Tracing -# ============================================================================= -# When enabled, ClickHouse logs spans to system.opentelemetry_span_log for -# queries arriving with W3C traceparent headers. -opentelemetry: - enabled: false - spanLog: - ttlDays: 7 - flushIntervalMs: 1000 - -# ============================================================================= -# Server -# ============================================================================= -server: - securityContext: - runAsNonRoot: true - runAsUser: 101 - runAsGroup: 101 - fsGroup: 101 - - resources: - requests: - cpu: "1" - memory: "4Gi" - limits: - cpu: "2" - memory: "8Gi" - - persistence: - storageClass: "" - size: 50Gi - - settings: - maxConnections: 4096 - extraConfig: "" # Raw XML injected into server config - extraUsersConfig: "" # Raw XML injected into users config - prometheus: - enabled: true - port: 9363 - endpoint: /metrics - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 - -# ============================================================================= -# Keeper (ClickHouse Keeper for replication coordination) -# ============================================================================= -keeper: - replicas: 1 - - securityContext: - runAsNonRoot: true - runAsUser: 101 - runAsGroup: 101 - fsGroup: 101 - - resources: - requests: - cpu: "250m" - memory: "512Mi" - limits: - cpu: "500m" - memory: "1Gi" - - persistence: - storageClass: "" - size: 5Gi - - settings: - prometheus: - enabled: true - port: 9090 - endpoint: /metrics - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - -# ============================================================================= -# Pod Disruption Budgets -# ============================================================================= -podDisruptionBudget: - server: - enabled: false - maxUnavailable: 1 - keeper: - enabled: false - maxUnavailable: 1 - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - - kafka - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-analytics-namespace - # ports: - # - port: 8123 - # protocol: TCP - -# ============================================================================= -# Service Monitor (Prometheus Operator CRD) -# ============================================================================= -serviceMonitor: - enabled: false - interval: "15s" - serviceType: headless # headless = per-pod scraping, clusterIP = any-pod - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall diff --git a/environments/argo2/countly-tls.env b/environments/argo2/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/argo2/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/argo2/countly.yaml b/environments/argo2/countly.yaml deleted file mode 100644 index ccf54bb..0000000 --- a/environments/argo2/countly.yaml +++ /dev/null @@ -1,570 +0,0 @@ -# ============================================================================= -# Countly Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Service Account --- -serviceAccount: - create: true - name: "" # Auto-derived from release name if empty - annotations: {} - -# --- Image --- -image: - repository: gcr.io/countly-dev-313620/countly-unified - digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" - tag: "26.01" # Fallback when digest is empty - pullPolicy: IfNotPresent - -# --- Cross-Namespace References --- -clickhouseNamespace: clickhouse -kafkaNamespace: kafka -mongodbNamespace: mongodb - -# ============================================================================= -# Component: API -# ============================================================================= -api: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:api"] - port: 3001 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 120 - terminationGracePeriodSeconds: 120 - resources: - requests: - cpu: "1" - memory: "3.5Gi" - limits: - cpu: "1" - memory: "4Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 6 - metrics: - cpu: - averageUtilization: 70 - memory: - averageUtilization: 80 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 # 1-100, only used with type=preferred - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Frontend -# ============================================================================= -frontend: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:frontend"] - port: 6001 - healthCheck: - path: /ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - terminationGracePeriodSeconds: 30 - resources: - requests: - cpu: "1" - memory: "2.5Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 1 - metrics: - cpu: - averageUtilization: 80 - memory: {} - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Ingestor -# ============================================================================= -ingestor: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:ingestor"] - port: 3010 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 120 - terminationGracePeriodSeconds: 120 - resources: - requests: - cpu: "1" - memory: "3Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 12 - metrics: - cpu: - averageUtilization: 65 - memory: - averageUtilization: 75 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Aggregator -# ============================================================================= -aggregator: - enabled: true - replicaCount: 4 - command: ["npm", "run", "start:aggregator"] - port: 0 # No HTTP port exposed - healthCheck: {} # No HTTP health check (no port) - terminationGracePeriodSeconds: 60 - resources: - requests: - cpu: "1" - memory: "3.5Gi" - limits: - cpu: "2" - memory: "4Gi" - hpa: - enabled: true - minReplicas: 4 - maxReplicas: 8 - metrics: - cpu: - averageUtilization: 65 - memory: - averageUtilization: 65 - behavior: - scaleUp: - stabilizationWindowSeconds: 0 - policies: - - type: Percent - value: 100 - periodSeconds: 15 - scaleDown: - stabilizationWindowSeconds: 300 - policies: - - type: Percent - value: 10 - periodSeconds: 60 - pdb: - enabled: true - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Job Server -# ============================================================================= -jobserver: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:jobserver"] - port: 3020 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - terminationGracePeriodSeconds: 30 - resources: - requests: - cpu: "1" - memory: "3Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 1 - metrics: - cpu: - averageUtilization: 80 - memory: - averageUtilization: 85 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: false - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Configuration (ConfigMaps) -# ============================================================================= -config: - # --- Common (shared by all components) --- - common: - NODE_ENV: production - COUNTLY_PLUGINS: "mobile,web,desktop,plugins,density,locale,browser,sources,views,logger,systemlogs,populator,reports,crashes,push,star-rating,slipping-away-users,compare,server-stats,dbviewer,crash_symbolication,crash-analytics,alerts,onboarding,consolidate,remote-config,hooks,dashboards,sdk,data-manager,guides,heatmaps,retention_segments,formulas,funnels,cohorts,ab-testing,performance-monitoring,config-transfer,data-migration,two-factor-auth,blocking,concurrent_users,revenue,activity-map,flows,surveys,event-timeline,drill,multi,active_users,ip-blocker,kafka,clickhouse" - COUNTLY_CONFIG__FILESTORAGE: gridfs - COUNTLY_CONFIG__DRILL_EVENTS_DRIVER: clickhouse - COUNTLY_CONFIG__SHARED_CONNECTION: "true" - COUNTLY_CONFIG__DATABASE_ADAPTERPREFERENCE: '["clickhouse","mongodb"]' - COUNTLY_CONFIG__DATABASE_ADAPTERS_MONGODB_ENABLED: "true" - COUNTLY_CONFIG__DATABASE_ADAPTERS_CLICKHOUSE_ENABLED: "true" - COUNTLY_CONFIG__DATABASE_FAILONCONNECTIONERROR: "true" - COUNTLY_CONFIG__EVENTSINK_SINKS: '["kafka"]' - COUNTLY_CONFIG__RELOADCONFIGAFTER: "10000" - - # --- API --- - api: - COUNTLY_CONTAINER: api - COUNTLY_CONFIG__API_PORT: "3001" - COUNTLY_CONFIG__API_HOST: "0.0.0.0" - COUNTLY_CONFIG__API_MAX_SOCKETS: "1024" - COUNTLY_CONFIG__API_MAX_UPLOAD_FILE_SIZE: "209715200" # 200 MiB - COUNTLY_CONFIG__API_TIMEOUT: "120000" # ms - - # --- Frontend --- - frontend: - COUNTLY_CONTAINER: frontend - COUNTLY_CONFIG__WEB_PORT: "6001" - COUNTLY_CONFIG__WEB_HOST: "0.0.0.0" - COUNTLY_CONFIG__WEB_SECURE_COOKIES: "false" - COUNTLY_CONFIG__COOKIE_MAXAGE: "86400000" # 24 hours in ms - - # --- Ingestor --- - ingestor: - COUNTLY_CONTAINER: ingestor - COUNTLY_CONFIG__INGESTOR_PORT: "3010" - COUNTLY_CONFIG__INGESTOR_HOST: "0.0.0.0" - - # --- Aggregator --- - aggregator: - COUNTLY_CONTAINER: aggregator - UV_THREADPOOL_SIZE: "6" - - # --- Job Server --- - jobserver: - COUNTLY_CONTAINER: jobserver - COUNTLY_CONFIG__JOBSERVER_PORT: "3020" - COUNTLY_CONFIG__JOBSERVER_HOST: "0.0.0.0" - - # --- ClickHouse Connection --- - clickhouse: - COUNTLY_CONFIG__CLICKHOUSE_QUERYOPTIONS_MAX_EXECUTION_TIME: "600" - COUNTLY_CONFIG__CLICKHOUSE_REQUEST_TIMEOUT: "1200000" # ms - COUNTLY_CONFIG__CLICKHOUSE_MAX_OPEN_CONNECTIONS: "10" - COUNTLY_CONFIG__CLICKHOUSE_APPLICATION: countly_drill - COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_REQUEST: "false" - COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_RESPONSE: "false" - COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_ENABLED: "true" - COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_IDLE_SOCKET_TTL: "10000" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_NAME: countly_cluster - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_SHARDS: "false" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_REPLICAS: "false" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_ISCLOUD: "false" - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_COORDINATORTYPE: keeper - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_ZKPATH: "/clickhouse/tables/{shard}/{database}/{table}" - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_REPLICANAME: "{replica}" - COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_ENABLED: "false" - COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_MAXPARALLELREPLICAS: "2" - COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_WRITETHROUGH: "true" - COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_INSERTDISTRIBUTEDSYNC: "true" - COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_NATIVEPORT: "9000" - COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_SECURE: "false" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_DAYSOLD: "30" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MIN: "60" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MAX: "120" - - # --- Kafka Connection --- - kafka: - COUNTLY_CONFIG__KAFKA_ENABLED: "true" - COUNTLY_CONFIG__KAFKA_DRILLEVENTSTOPIC: drill-events - COUNTLY_CONFIG__KAFKA_CLUSTER_NAME: cly-kafka - COUNTLY_CONFIG__KAFKA_PARTITIONS: "100" - COUNTLY_CONFIG__KAFKA_REPLICATIONFACTOR: "2" - COUNTLY_CONFIG__KAFKA_RETENTIONMS: "604800000" # 7 days in ms - COUNTLY_CONFIG__KAFKA_ENABLETRANSACTIONS: "false" - COUNTLY_CONFIG__KAFKA_TRANSACTIONTIMEOUT: "60000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_CLIENTID: countly-app - COUNTLY_CONFIG__KAFKA_RDKAFKA_REQUESTTIMEOUTMS: "20000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_CONNECTIONTIMEOUTMS: "8000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_LINGERMS: "10" - COUNTLY_CONFIG__KAFKA_RDKAFKA_RETRIES: "5" - COUNTLY_CONFIG__KAFKA_RDKAFKA_ACKS: "-1" # -1 = all ISR replicas must acknowledge - COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMINBYTES: "1024000" - COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMAXWAITMS: "1200" - COUNTLY_CONFIG__KAFKA_CONSUMER_SESSIONTIMEOUTMS: "120000" - COUNTLY_CONFIG__KAFKA_CONSUMER_HEARTBEATINTERVALMS: "20000" - COUNTLY_CONFIG__KAFKA_CONSUMER_AUTOOFFSETRESET: earliest - COUNTLY_CONFIG__KAFKA_CONSUMER_ENABLEAUTOCOMMIT: "false" - COUNTLY_CONFIG__KAFKA_CONSUMER_MAXPOLLINTERVALMS: "600000" - COUNTLY_CONFIG__KAFKA_CONNECTCONSUMERGROUPID: "connect-ch" - - # --- OpenTelemetry --- - otel: - OTEL_ENABLED: "false" - OTEL_EXPORTER_OTLP_ENDPOINT: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4318" - OTEL_EXPORTER_OTLP_PROTOCOL: "http/protobuf" - OTEL_TRACES_SAMPLER: "parentbased_traceidratio" - OTEL_TRACES_SAMPLER_ARG: "1.0" # 0.0-1.0, fraction of traces to sample - PYROSCOPE_ENABLED: "false" - -# --- Node.js Options (injected into configmap per component) --- -nodeOptions: - api: "--max-old-space-size=3072 --max-semi-space-size=256" - frontend: "--max-old-space-size=2048" - ingestor: "--max-old-space-size=2048 --max-semi-space-size=256" - aggregator: "--max-old-space-size=3072 --max-semi-space-size=128" - jobserver: "--max-old-space-size=2048 --max-semi-space-size=256" - -# ============================================================================= -# Backing Service Modes -# ============================================================================= -# When mode=external, the corresponding chart is not deployed and connection -# details below are used instead. - -backingServices: - mongodb: - mode: bundled # bundled | external - host: "" - port: "27017" - connectionString: "" # If set, used as-is (bypasses host/port/user/pass) - username: "app" - password: "" - database: "admin" - replicaSet: "" - existingSecret: "" - # --- External MongoDB Atlas example --- - # mode: external - # connectionString: "mongodb+srv://user:pass@cluster0.example.mongodb.net/admin?retryWrites=true&w=majority" - - clickhouse: - mode: bundled # bundled | external - host: "" - port: "8123" - tls: "false" - username: "default" - password: "" - database: "countly_drill" - existingSecret: "" - # --- External ClickHouse Cloud example --- - # mode: external - # host: "abc123.us-east-1.aws.clickhouse.cloud" - # port: "8443" - # tls: "true" - - kafka: - mode: bundled # bundled | external - brokers: "" # Comma-separated broker list - securityProtocol: "PLAINTEXT" # PLAINTEXT | SSL | SASL_PLAINTEXT | SASL_SSL - saslMechanism: "" - saslUsername: "" - saslPassword: "" - existingSecret: "" - # --- External Confluent Cloud example --- - # mode: external - # brokers: "pkc-12345.us-east-1.aws.confluent.cloud:9092" - # securityProtocol: "SASL_SSL" - # saslMechanism: "PLAIN" - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - mode: values # values | existingSecret | externalSecret - keep: true # Retain secrets on helm uninstall - rotationId: "" # Change to force secret re-creation - - common: - existingSecret: "" - encryptionReportsKey: "" # REQUIRED: min 8 chars - webSessionSecret: "" # REQUIRED: min 8 chars - passwordSecret: "" # REQUIRED: min 8 chars - - clickhouse: - existingSecret: "" - username: "" - password: "" - database: "" - - kafka: - existingSecret: "" - securityProtocol: "" - saslMechanism: "" - saslUsername: "" - saslPassword: "" - - mongodb: - existingSecret: "" - key: "connectionString.standard" # Key within the secret to read - password: "" # REQUIRED on first install (must match users.app.password in countly-mongodb) - - # --- ExternalSecret configuration (used only when mode=externalSecret) --- - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: "" - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: "" - webSessionSecret: "" - passwordSecret: "" - clickhouse: - url: "" - username: "" - password: "" - database: "" - kafka: - brokers: "" - securityProtocol: "" - saslMechanism: "" - saslUsername: "" - saslPassword: "" - mongodb: - connectionString: "" - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - ingressNamespaceSelector: - kubernetes.io/metadata.name: ingress-nginx - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-custom-namespace - # ports: - # - port: 3001 - # protocol: TCP - -# ============================================================================= -# Ingress -# ============================================================================= -ingress: - enabled: true - className: nginx - annotations: - # F5 NGINX Ingress Controller (OSS) annotations - nginx.org/client-max-body-size: "50m" - nginx.org/proxy-buffering: "True" - nginx.org/proxy-buffer-size: "256k" - nginx.org/proxy-buffers: "16 256k" - nginx.org/proxy-busy-buffers-size: "512k" - nginx.org/proxy-max-temp-file-size: "2048m" - nginx.org/client-body-buffer-size: "2m" - nginx.org/proxy-connect-timeout: "60s" - nginx.org/proxy-read-timeout: "120s" - nginx.org/proxy-send-timeout: "120s" - nginx.org/keepalive: "256" - nginx.org/server-snippets: | - otel_trace on; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Scheme $scheme; - proxy_set_header Connection ""; - proxy_set_header X-Request-ID $request_id; - proxy_set_header X-Request-Start $msec; - # traceparent/tracestate now handled by ngx_otel_module (otel-trace-context: propagate) - client_header_timeout 30s; - nginx.org/location-snippets: | - proxy_request_buffering on; - proxy_next_upstream error timeout http_502 http_503 http_504; - proxy_next_upstream_timeout 30s; - proxy_next_upstream_tries 3; - proxy_temp_file_write_size 1m; - client_body_timeout 120s; - hostname: "" # Set via argocd/customers/.yaml - tls: - # TLS mode: letsencrypt | existingSecret | selfSigned | http - # http: No TLS - # letsencrypt: cert-manager + Let's Encrypt (recommended for production) - # existingSecret: Bring your own TLS secret - # selfSigned: cert-manager self-signed CA (for development) - mode: "" # Set via argocd/customers/.yaml - clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt - secretName: "" # Auto-derived if empty: -tls - selfSigned: - issuerName: "" # Auto-derived if empty: -ca-issuer - caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/environments/argo2/external-secrets.example.yaml b/environments/argo2/external-secrets.example.yaml deleted file mode 100644 index 7bf93ef..0000000 --- a/environments/argo2/external-secrets.example.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in environments//countly.yaml: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: my-secret-store -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "countly/encryption-reports-key" -# webSessionSecret: "countly/web-session-secret" -# passwordSecret: "countly/password-secret" -# clickhouse: -# url: "countly/clickhouse-url" -# username: "countly/clickhouse-username" -# password: "countly/clickhouse-password" -# database: "countly/clickhouse-database" -# kafka: -# brokers: "countly/kafka-brokers" -# securityProtocol: "countly/kafka-security-protocol" -# mongodb: -# connectionString: "countly/mongodb-connection-string" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/argo2/global.yaml b/environments/argo2/global.yaml deleted file mode 100644 index 6996309..0000000 --- a/environments/argo2/global.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - storageClass: "" - imagePullSecrets: [] - -ingress: - hostname: argo2.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/argo2/kafka.yaml b/environments/argo2/kafka.yaml deleted file mode 100644 index 726decd..0000000 --- a/environments/argo2/kafka.yaml +++ /dev/null @@ -1,384 +0,0 @@ -# ============================================================================= -# Kafka Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-kafka/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Strimzi Operator API Version --- -strimzi: - apiVersion: kafka.strimzi.io/v1 - -# --- Kafka Version --- -version: "4.2.0" - -# ============================================================================= -# Brokers -# ============================================================================= -brokers: - replicas: 3 - resources: - requests: - cpu: "1" - memory: "4Gi" - limits: - cpu: "1" - memory: "4Gi" - jvmOptions: - xms: "2g" - xmx: "2g" - - # --- Persistence --- - persistence: - volumes: - - id: 0 - size: 100Gi - storageClass: "" - deleteClaim: false # Delete PVC when broker is removed - - # --- Broker Config --- - config: - default.replication.factor: 2 - min.insync.replicas: 2 - log.retention.hours: 168 # 7 days - log.segment.bytes: "1073741824" # 1 GiB - compression.type: lz4 - auto.create.topics.enable: false - offsets.topic.replication.factor: 2 - num.partitions: 24 - transaction.state.log.replication.factor: 2 - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - -# ============================================================================= -# Controllers (KRaft) -# ============================================================================= -controllers: - replicas: 3 - resources: - requests: - cpu: "500m" - memory: "2Gi" - limits: - cpu: "1" - memory: "2Gi" - - persistence: - size: 20Gi - storageClass: "" - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - -# ============================================================================= -# Listeners -# ============================================================================= -listeners: - - name: internal - port: 9092 - type: internal - tls: false - -# ============================================================================= -# Cruise Control -# ============================================================================= -cruiseControl: - enabled: true - resources: - requests: - cpu: "1" - memory: "2Gi" - limits: - cpu: "1" - memory: "2Gi" - jvmOptions: - xms: "1g" - xmx: "2g" - autoRebalance: - - mode: add-brokers - - mode: remove-brokers - -# ============================================================================= -# Kafka Connect (ClickHouse Sink) -# ============================================================================= -kafkaConnect: - enabled: true - name: connect-ch - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" - replicas: 2 - bootstrapServers: "" # Auto-derived from cluster if empty - - resources: - requests: - cpu: "2" - memory: "8Gi" - limits: - cpu: "2" - memory: "8Gi" - jvmOptions: - xms: "5g" - xmx: "5g" - - # --- Worker Configuration --- - workerConfig: - group.id: connect-ch - config.storage.topic: connect_ch_configs - offset.storage.topic: connect_ch_offsets - status.storage.topic: connect_ch_status - config.storage.replication.factor: 2 - offset.storage.replication.factor: 2 - status.storage.replication.factor: 2 - offset.storage.partitions: 25 - status.storage.partitions: 5 - key.converter: org.apache.kafka.connect.storage.StringConverter - value.converter: org.apache.kafka.connect.json.JsonConverter - value.converter.schemas.enable: "false" - connector.client.config.override.policy: All - config.providers: env - config.providers.env.class: org.apache.kafka.common.config.provider.EnvVarConfigProvider - - # --- ClickHouse Connection (for the sink connector) --- - clickhouse: - existingSecret: "" - secretName: clickhouse-auth - host: "" # Auto-derived from clickhouseNamespace if empty - port: "8123" - ssl: "false" - database: "countly_drill" - username: "default" - password: "" # REQUIRED: must match ClickHouse default user password - - # --- Environment Variables (injected into Connect pods) --- - env: - EXACTLY_ONCE: "false" - ERRORS_RETRY_TIMEOUT: "300" - ERRORS_TOLERANCE: "none" # none | all - CLICKHOUSE_SETTINGS: "input_format_binary_read_json_as_string=1,allow_experimental_json_type=1,enable_json_type=1,async_insert=1,wait_for_async_insert=1,async_insert_use_adaptive_busy_timeout=1,async_insert_busy_timeout_ms=10000,async_insert_max_data_size=268435456,async_insert_max_query_number=64,min_insert_block_size_rows=250000,min_insert_block_size_bytes=268435456,max_partitions_per_insert_block=500" - BYPASS_ROW_BINARY: "false" - TABLE_REFRESH_INTERVAL: "300" # seconds - KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter - VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter - VALUE_CONVERTER_SCHEMAS_ENABLE: "false" - KAFKA_CONSUMER_FETCH_MIN_BYTES: "33554432" # 32 MiB - KAFKA_CONSUMER_FETCH_MAX_WAIT_MS: "60000" - KAFKA_CONSUMER_MAX_POLL_RECORDS: "250000" - KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES: "134217728" # 128 MiB - KAFKA_CONSUMER_FETCH_MAX_BYTES: "536870912" # 512 MiB - KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS: "900000" - KAFKA_CONSUMER_SESSION_TIMEOUT_MS: "45000" - KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS: "15000" - KAFKA_CONSUMER_REQUEST_TIMEOUT_MS: "120000" - - # --- HPA --- - hpa: - enabled: false - minReplicas: 1 - maxReplicas: 3 - metrics: - cpu: - averageUtilization: 70 - memory: - averageUtilization: 80 - behavior: - scaleUp: - stabilizationWindowSeconds: 120 - policies: - - type: Percent - value: 50 - periodSeconds: 120 - - type: Pods - value: 2 - periodSeconds: 120 - selectPolicy: Min - scaleDown: - stabilizationWindowSeconds: 600 - policies: - - type: Percent - value: 25 - periodSeconds: 300 - - type: Pods - value: 1 - periodSeconds: 300 - selectPolicy: Min - - # --- OpenTelemetry Java Agent --- - # Baked into the Docker image at /opt/otel/opentelemetry-javaagent.jar. - # When enabled, JAVA_TOOL_OPTIONS activates it for Kafka consumer/producer - # and outbound HTTP (ClickHouse sink) span creation. - otel: - enabled: false - serviceName: "kafka-connect" - exporterEndpoint: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4317" - exporterProtocol: "grpc" - sampler: "parentbased_traceidratio" - samplerArg: "1.0" - resourceAttributes: "" # e.g. "deployment.environment=production,k8s.cluster.name=my-cluster" - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - - # --- Connectors --- - connectors: - - name: ch-sink-drill-events - enabled: true - state: running # running | paused | stopped - class: com.clickhouse.kafka.connect.ClickHouseSinkConnector - tasksMax: 1 - autoRestart: - enabled: true - maxRestarts: 10 - config: - topics: drill-events - topic2TableMap: "drill-events=drill_events" - hostname: "${env:CLICKHOUSE_HOST}" - port: "${env:CLICKHOUSE_PORT}" - ssl: "${env:CLICKHOUSE_SSL}" - database: "${env:CLICKHOUSE_DB}" - username: "${env:CLICKHOUSE_USER}" - password: "${env:CLICKHOUSE_PASSWORD}" - exactlyOnce: "${env:EXACTLY_ONCE}" - errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" - errors.tolerance: "${env:ERRORS_TOLERANCE}" - clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" - bypassRowBinary: "${env:BYPASS_ROW_BINARY}" - tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" - key.converter: "${env:KEY_CONVERTER}" - value.converter: "${env:VALUE_CONVERTER}" - value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" - consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" - consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" - consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" - consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" - consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" - consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" - consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" - consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" - consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" - connection.timeout: "60" - socket.timeout: "30000" - retry.count: "3" - connection.pool.size: "10" - healthcheck.enabled: "true" - healthcheck.interval: "10000" - dlq: {} # Dead-letter queue config (empty = disabled) - -# ============================================================================= -# Metrics -# ============================================================================= -metrics: - enabled: true - -# --- Cross-Namespace Reference --- -clickhouseNamespace: clickhouse - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 9092 - # protocol: TCP - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall -# Kafka overrides for argo2 -# -# Migration is disabled for this customer, so keep the drill sink connector off. - -kafkaConnect: - connectors: - - name: ch-sink-drill-events - enabled: false - state: running - class: com.clickhouse.kafka.connect.ClickHouseSinkConnector - tasksMax: 1 - autoRestart: - enabled: true - maxRestarts: 10 - config: - topics: drill-events - topic2TableMap: "drill-events=drill_events" - hostname: "${env:CLICKHOUSE_HOST}" - port: "${env:CLICKHOUSE_PORT}" - ssl: "${env:CLICKHOUSE_SSL}" - database: "${env:CLICKHOUSE_DB}" - username: "${env:CLICKHOUSE_USER}" - password: "${env:CLICKHOUSE_PASSWORD}" - exactlyOnce: "${env:EXACTLY_ONCE}" - errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" - errors.tolerance: "${env:ERRORS_TOLERANCE}" - clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" - bypassRowBinary: "${env:BYPASS_ROW_BINARY}" - tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" - key.converter: "${env:KEY_CONVERTER}" - value.converter: "${env:VALUE_CONVERTER}" - value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" - consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" - consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" - consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" - consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" - consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" - consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" - consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" - consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" - consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" - connection.timeout: "60" - socket.timeout: "30000" - retry.count: "3" - connection.pool.size: "10" - healthcheck.enabled: "true" - healthcheck.interval: "10000" - dlq: {} diff --git a/environments/argo2/migration.yaml b/environments/argo2/migration.yaml deleted file mode 100644 index 6fa760c..0000000 --- a/environments/argo2/migration.yaml +++ /dev/null @@ -1,3 +0,0 @@ -# Migration overrides for optional countly-migration app. -# Enable per customer by setting `migration: enabled` in argocd/customers/.yaml -# and then filling this file with environment-specific overrides as needed. diff --git a/environments/argo2/mongodb.yaml b/environments/argo2/mongodb.yaml deleted file mode 100644 index 31f230e..0000000 --- a/environments/argo2/mongodb.yaml +++ /dev/null @@ -1,144 +0,0 @@ -# ============================================================================= -# MongoDB Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-mongodb/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# ============================================================================= -# MongoDB Server -# ============================================================================= -mongodb: - version: "8.2.5" - members: 2 # Replica set member count - - resources: - requests: - cpu: "500m" - memory: "2Gi" - limits: - cpu: "2" - memory: "8Gi" - - persistence: - storageClass: "" # Overrides global.storageClass for MongoDB PVCs - size: 100Gi - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 - - # --- TLS --- - tls: - enabled: false - -# ============================================================================= -# Users -# ============================================================================= -users: - # --- Application User --- - app: - name: app - database: admin - roles: - - name: readWriteAnyDatabase - db: admin - - name: dbAdmin - db: admin - passwordSecretName: app-user-password - passwordSecretKey: password - password: "" # REQUIRED on first install - - # --- Metrics Exporter User --- - metrics: - enabled: true - name: metrics - database: admin - roles: - - name: clusterMonitor - db: admin - - name: read - db: local - passwordSecretName: metrics-user-password - passwordSecretKey: password - password: "" # REQUIRED on first install - -# ============================================================================= -# Prometheus Exporter -# ============================================================================= -exporter: - enabled: true - image: percona/mongodb_exporter:0.40.0 - port: 9216 - resources: - requests: - cpu: "50m" - memory: "64Mi" - limits: - cpu: "200m" - memory: "256Mi" - service: - enabled: true - args: - - --collect-all - - --collector.diagnosticdata - - --collector.replicasetstatus - - --collector.dbstats - - --collector.topmetrics - - --collector.indexstats - - --collector.collstats - -# ============================================================================= -# Pod Disruption Budget -# ============================================================================= -podDisruptionBudget: - enabled: false - maxUnavailable: 1 - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 27017 - # protocol: TCP - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall diff --git a/environments/argo2/observability.yaml b/environments/argo2/observability.yaml deleted file mode 100644 index 79a4b39..0000000 --- a/environments/argo2/observability.yaml +++ /dev/null @@ -1,437 +0,0 @@ -# ============================================================================= -# Observability Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-observability/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Deployment Mode --- -# full — All backends + Grafana in-cluster -# hybrid — All backends in-cluster, no Grafana (use external Grafana) -# external — Collectors only, forward to external endpoints -# disabled — Set in global.yaml observability.mode to skip this chart entirely -mode: full - -# --- Cluster Name (injected into Prometheus external_labels) --- -clusterName: countly-local - -# --- Cross-Namespace References --- -countlyNamespace: countly -clickhouseNamespace: clickhouse -mongodbNamespace: mongodb -kafkaNamespace: kafka -ingressNamespace: ingress-nginx -certManagerNamespace: cert-manager -clickhouseOperatorNamespace: clickhouse-operator-system - -# --- NGINX Ingress Controller Scrape Configuration --- -nginxIngress: - podLabelName: "nginx-ingress" # F5 NGINX IC = "nginx-ingress", community = "ingress-nginx" - metricsPort: "9113" - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -# ============================================================================= -# Per-Signal Configuration -# ============================================================================= - -# --- Metrics --- -metrics: - enabled: true - sampling: - interval: "15s" # Global Prometheus scrape_interval - -# --- Traces --- -traces: - enabled: true - sampling: - strategy: "AlwaysOn" # AlwaysOn | TraceIdRatio | ParentBased | TailBased - ratio: 1.0 # 0.0-1.0, used with TraceIdRatio or ParentBased - tailSampling: # Only used when strategy == TailBased - waitDuration: "10s" - numTraces: 50000 - policies: - keepErrors: true - latencyThresholdMs: 2000 - baselineRatio: 0.1 - -# --- Logs --- -logs: - enabled: true - sampling: - enabled: false - dropRate: 0 # 0.0-1.0, fraction of logs to drop - -# --- Profiling --- -profiling: - enabled: true - sampling: - rate: "100" # Advisory — used in NOTES.txt for SDK config - -# ============================================================================= -# Prometheus -# ============================================================================= -prometheus: - image: - repository: prom/prometheus - tag: "v3.10.0" - retention: - time: "30d" - size: "50GB" - storage: - size: 100Gi - storageClass: "" - resources: - requests: - cpu: "2" - memory: "3Gi" - limits: - cpu: "2" - memory: "4Gi" - extraArgs: [] - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - remoteWriteUrl: "" # Full Prometheus remote write URL (e.g. https://prom.corp.com/api/v1/write) - -# ============================================================================= -# Loki -# ============================================================================= -loki: - image: - repository: grafana/loki - tag: "3.6.7" - retention: "30d" - storage: - backend: "filesystem" # filesystem | s3 | gcs | azure - size: 100Gi - storageClass: "" - # Object storage settings (only used when backend != filesystem) - bucket: "" # Bucket/container name (REQUIRED for object backends) - endpoint: "" # Custom endpoint (e.g. MinIO: http://minio:9000) - region: "" # Cloud region (S3) - insecure: false # Use HTTP instead of HTTPS - forcePathStyle: false # S3 path-style access (required for MinIO) - # Credential file secret (for GCS JSON key files) - existingSecret: "" # K8s Secret name to mount - secretKey: "key.json" # Key within the Secret - secretMountPath: "/var/secrets/storage" - # Env-based credentials (for AWS access keys, Azure account keys) - envFromSecret: "" # K8s Secret name to inject as env vars - # Provider-specific passthrough (rendered directly into provider block) - config: {} - # - # --- Object storage examples (apply to loki, tempo, and pyroscope) --- - # - # --- AWS S3 example --- - # backend: s3 - # s3: - # bucket: my-loki-data - # region: us-east-1 - # endpoint: "" - # insecure: false - # forcePathStyle: false - # credentials: - # source: envFromSecret - # envFromSecret: loki-s3-credentials # Must contain AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY - # - # --- GCS example --- - # backend: gcs - # gcs: - # bucket: my-loki-data - # credentials: - # source: existingSecret - # existingSecret: loki-gcs-key - # secretKey: "key.json" - # - # --- MinIO example --- - # backend: s3 - # s3: - # bucket: loki - # endpoint: minio.storage.svc.cluster.local:9000 - # insecure: true - # forcePathStyle: true - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1" - memory: "2Gi" - config: - maxStreamsPerUser: 30000 - maxLineSize: 256000 - ingestionRateMb: 64 - ingestionBurstSizeMb: 128 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - pushUrl: "" # Full Loki push URL (e.g. https://loki.corp.com/loki/api/v1/push) - -# ============================================================================= -# Tempo -# ============================================================================= -tempo: - image: - repository: grafana/tempo - tag: "2.10.1" - retention: "12h" - storage: - backend: "local" # local | s3 | gcs | azure - size: 150Gi - storageClass: "" - bucket: "" - endpoint: "" - region: "" - insecure: false - forcePathStyle: false - existingSecret: "" - secretKey: "key.json" - secretMountPath: "/var/secrets/storage" - envFromSecret: "" - config: {} - resources: - requests: - cpu: "3" - memory: "6Gi" - limits: - cpu: "4" - memory: "10Gi" - config: - ingestionRateLimitBytes: 100000000 - ingestionBurstSizeBytes: 150000000 - maxTracesPerUser: 50000 - maxBytesPerTrace: 5000000 - maxRecvMsgSizeMiB: 16 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - otlpGrpcEndpoint: "" # Tempo OTLP gRPC host:port (e.g. tempo.corp.com:4317) - otlpHttpEndpoint: "" # Tempo OTLP HTTP URL (optional fallback) - -# ============================================================================= -# Pyroscope -# ============================================================================= -pyroscope: - image: - repository: grafana/pyroscope - tag: "1.18.1" - retention: "72h" - storage: - backend: "filesystem" # filesystem | s3 | gcs | azure | swift - size: 20Gi - storageClass: "" - bucket: "" - endpoint: "" - region: "" - insecure: false - forcePathStyle: false - existingSecret: "" - secretKey: "key.json" - secretMountPath: "/var/secrets/storage" - envFromSecret: "" - config: {} - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1" - memory: "2Gi" - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - ingestUrl: "" # Pyroscope ingest URL (e.g. https://pyroscope.corp.com) - -# ============================================================================= -# Grafana -# ============================================================================= -grafana: - enabled: true # Only deployed when mode == "full" - image: - repository: grafana/grafana - tag: "12.4.0" - admin: - existingSecret: "" # Use an existing Secret for admin credentials - userKey: "admin-user" - passwordKey: "admin-password" - persistence: - enabled: false # Ephemeral by default (declarative config, no state to lose) - size: 10Gi - storageClass: "" - resources: - requests: - cpu: "1" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - plugins: - install: "grafana-pyroscope-datasource" - featureToggles: "tempoSearch,tempoBackendSearch,traceqlEditor,exploreTraces" - dashboards: - enabled: true - overview: true - platform: true - countly: true - data: true - edge: true - pdb: - enabled: false - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - url: "" # External Grafana URL (for NOTES.txt only) - -# ============================================================================= -# Alloy (DaemonSet — log collection) -# ============================================================================= -alloy: - image: - repository: grafana/alloy - tag: "v1.13.2" - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - scheduling: - nodeSelector: - kubernetes.io/os: linux - tolerations: [] - -# ============================================================================= -# Alloy-OTLP (Deployment — OTLP traces + profiling receive) -# ============================================================================= -alloyOtlp: - image: - repository: grafana/alloy - tag: "v1.13.2" - replicas: 1 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - memoryLimiter: - limit: "1600MiB" # Must be < resources.limits.memory - spikeLimit: "400MiB" - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# Alloy-Metrics (Deployment — ALL Prometheus scraping) -# ============================================================================= -alloyMetrics: - image: - repository: grafana/alloy - tag: "v1.13.2" - replicas: 1 - resources: - requests: - cpu: "500m" - memory: "512Mi" - limits: - cpu: "500m" - memory: "512Mi" - pdb: - enabled: false - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# kube-state-metrics -# ============================================================================= -kubeStateMetrics: - enabled: true - image: - repository: registry.k8s.io/kube-state-metrics/kube-state-metrics - tag: "v2.18.0" - resources: - requests: - cpu: "10m" - memory: "32Mi" - limits: - cpu: "100m" - memory: "256Mi" - namespaces: - - countly - - observability - - ingress-nginx - - kube-system - - clickhouse - - mongodb - - kafka - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# node-exporter -# ============================================================================= -nodeExporter: - enabled: true - image: - repository: prom/node-exporter - tag: "v1.10.2" - resources: - requests: - cpu: "100m" - memory: "180Mi" - limits: - cpu: "250m" - memory: "300Mi" - -# ============================================================================= -# Ingress (for Grafana) -# ============================================================================= -ingress: - enabled: false - className: nginx - annotations: {} - hosts: - - host: obs.example.com - tls: [] - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 4318 - # protocol: TCP diff --git a/environments/argo2/secrets-clickhouse.yaml b/environments/argo2/secrets-clickhouse.yaml deleted file mode 100644 index 9f97b3e..0000000 --- a/environments/argo2/secrets-clickhouse.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# ClickHouse direct secrets for argo2 -auth: - defaultUserPassword: - password: "argo2-clickhouse-2026" diff --git a/environments/argo2/secrets-countly.yaml b/environments/argo2/secrets-countly.yaml deleted file mode 100644 index 07a08be..0000000 --- a/environments/argo2/secrets-countly.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Countly direct secrets for argo2 -secrets: - mode: values - common: - encryptionReportsKey: "argo2-reports-key-2026" - webSessionSecret: "argo2-web-session-2026" - passwordSecret: "argo2-password-secret-2026" - clickhouse: - username: "default" - password: "argo2-clickhouse-2026" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - mongodb: - password: "argo2-mongo-2026" diff --git a/environments/argo2/secrets-kafka.yaml b/environments/argo2/secrets-kafka.yaml deleted file mode 100644 index f6e2e07..0000000 --- a/environments/argo2/secrets-kafka.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Kafka direct secrets for argo2 -kafkaConnect: - clickhouse: - password: "argo2-clickhouse-2026" diff --git a/environments/argo2/secrets-migration.yaml b/environments/argo2/secrets-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/argo2/secrets-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/argo2/secrets-mongodb.yaml b/environments/argo2/secrets-mongodb.yaml deleted file mode 100644 index 85a0dd4..0000000 --- a/environments/argo2/secrets-mongodb.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# MongoDB direct secrets for argo2 -users: - app: - password: "argo2-mongo-2026" - metrics: - enabled: true - password: "argo2-metrics-2026" diff --git a/environments/argo2/secrets-observability.yaml b/environments/argo2/secrets-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/argo2/secrets-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/argo2/secrets.example.yaml b/environments/argo2/secrets.example.yaml deleted file mode 100644 index 282eb0d..0000000 --- a/environments/argo2/secrets.example.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//secrets-countly.yaml) --- -secrets: - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- -users: - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//secrets-kafka.yaml) --- -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" diff --git a/environments/argo2/secrets.sops.example.yaml b/environments/argo2/secrets.sops.example.yaml deleted file mode 100644 index 9b652d1..0000000 --- a/environments/argo2/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//secrets-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//secrets-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx diff --git a/environments/argo3/README.md b/environments/argo3/README.md deleted file mode 100644 index 12b374b..0000000 --- a/environments/argo3/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - -3. Fill in required secrets in the chart-specific files: - - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `clickhouse.yaml` → `auth.defaultUserPassword.password` - - `kafka.yaml` → `kafkaConnect.clickhouse.password` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `secrets-mongodb.yaml` | MongoDB user passwords | -| `secrets-clickhouse.yaml` | ClickHouse auth password | -| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | -| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/argo3/clickhouse.yaml b/environments/argo3/clickhouse.yaml deleted file mode 100644 index d7899d3..0000000 --- a/environments/argo3/clickhouse.yaml +++ /dev/null @@ -1,203 +0,0 @@ -# ============================================================================= -# ClickHouse Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-clickhouse/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Operator API Version --- -clickhouseOperator: - apiVersion: clickhouse.com/v1alpha1 - -# ============================================================================= -# Cluster Topology -# ============================================================================= -version: "26.2" -shards: 1 -replicas: 2 - -# ============================================================================= -# Images -# ============================================================================= -image: - server: clickhouse/clickhouse-server - keeper: clickhouse/clickhouse-keeper - -# ============================================================================= -# Database -# ============================================================================= -database: countly_drill - -# ============================================================================= -# Authentication -# ============================================================================= -auth: - # --- Default User Password --- - defaultUserPassword: - existingSecret: "" # Use an existing secret instead of creating one - secretName: clickhouse-default-password - key: password - password: "" # REQUIRED: ClickHouse default user password - - # --- Admin User (optional, separate from default) --- - adminUser: - enabled: false - # Precomputed SHA256 hex of the admin password (64 hex chars). - # Generate: echo -n 'your_password' | sha256sum | cut -d' ' -f1 - passwordSha256Hex: "" - -# ============================================================================= -# OpenTelemetry Server-Side Tracing -# ============================================================================= -# When enabled, ClickHouse logs spans to system.opentelemetry_span_log for -# queries arriving with W3C traceparent headers. -opentelemetry: - enabled: false - spanLog: - ttlDays: 7 - flushIntervalMs: 1000 - -# ============================================================================= -# Server -# ============================================================================= -server: - securityContext: - runAsNonRoot: true - runAsUser: 101 - runAsGroup: 101 - fsGroup: 101 - - resources: - requests: - cpu: "1" - memory: "4Gi" - limits: - cpu: "2" - memory: "8Gi" - - persistence: - storageClass: "" - size: 50Gi - - settings: - maxConnections: 4096 - extraConfig: "" # Raw XML injected into server config - extraUsersConfig: "" # Raw XML injected into users config - prometheus: - enabled: true - port: 9363 - endpoint: /metrics - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 - -# ============================================================================= -# Keeper (ClickHouse Keeper for replication coordination) -# ============================================================================= -keeper: - replicas: 1 - - securityContext: - runAsNonRoot: true - runAsUser: 101 - runAsGroup: 101 - fsGroup: 101 - - resources: - requests: - cpu: "250m" - memory: "512Mi" - limits: - cpu: "500m" - memory: "1Gi" - - persistence: - storageClass: "" - size: 5Gi - - settings: - prometheus: - enabled: true - port: 9090 - endpoint: /metrics - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - -# ============================================================================= -# Pod Disruption Budgets -# ============================================================================= -podDisruptionBudget: - server: - enabled: false - maxUnavailable: 1 - keeper: - enabled: false - maxUnavailable: 1 - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - - kafka - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-analytics-namespace - # ports: - # - port: 8123 - # protocol: TCP - -# ============================================================================= -# Service Monitor (Prometheus Operator CRD) -# ============================================================================= -serviceMonitor: - enabled: false - interval: "15s" - serviceType: headless # headless = per-pod scraping, clusterIP = any-pod - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall diff --git a/environments/argo3/countly-tls.env b/environments/argo3/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/argo3/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/argo3/countly.yaml b/environments/argo3/countly.yaml deleted file mode 100644 index ccf54bb..0000000 --- a/environments/argo3/countly.yaml +++ /dev/null @@ -1,570 +0,0 @@ -# ============================================================================= -# Countly Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Service Account --- -serviceAccount: - create: true - name: "" # Auto-derived from release name if empty - annotations: {} - -# --- Image --- -image: - repository: gcr.io/countly-dev-313620/countly-unified - digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" - tag: "26.01" # Fallback when digest is empty - pullPolicy: IfNotPresent - -# --- Cross-Namespace References --- -clickhouseNamespace: clickhouse -kafkaNamespace: kafka -mongodbNamespace: mongodb - -# ============================================================================= -# Component: API -# ============================================================================= -api: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:api"] - port: 3001 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 120 - terminationGracePeriodSeconds: 120 - resources: - requests: - cpu: "1" - memory: "3.5Gi" - limits: - cpu: "1" - memory: "4Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 6 - metrics: - cpu: - averageUtilization: 70 - memory: - averageUtilization: 80 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 # 1-100, only used with type=preferred - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Frontend -# ============================================================================= -frontend: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:frontend"] - port: 6001 - healthCheck: - path: /ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - terminationGracePeriodSeconds: 30 - resources: - requests: - cpu: "1" - memory: "2.5Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 1 - metrics: - cpu: - averageUtilization: 80 - memory: {} - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Ingestor -# ============================================================================= -ingestor: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:ingestor"] - port: 3010 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 120 - terminationGracePeriodSeconds: 120 - resources: - requests: - cpu: "1" - memory: "3Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 12 - metrics: - cpu: - averageUtilization: 65 - memory: - averageUtilization: 75 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Aggregator -# ============================================================================= -aggregator: - enabled: true - replicaCount: 4 - command: ["npm", "run", "start:aggregator"] - port: 0 # No HTTP port exposed - healthCheck: {} # No HTTP health check (no port) - terminationGracePeriodSeconds: 60 - resources: - requests: - cpu: "1" - memory: "3.5Gi" - limits: - cpu: "2" - memory: "4Gi" - hpa: - enabled: true - minReplicas: 4 - maxReplicas: 8 - metrics: - cpu: - averageUtilization: 65 - memory: - averageUtilization: 65 - behavior: - scaleUp: - stabilizationWindowSeconds: 0 - policies: - - type: Percent - value: 100 - periodSeconds: 15 - scaleDown: - stabilizationWindowSeconds: 300 - policies: - - type: Percent - value: 10 - periodSeconds: 60 - pdb: - enabled: true - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Job Server -# ============================================================================= -jobserver: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:jobserver"] - port: 3020 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - terminationGracePeriodSeconds: 30 - resources: - requests: - cpu: "1" - memory: "3Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 1 - metrics: - cpu: - averageUtilization: 80 - memory: - averageUtilization: 85 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: false - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Configuration (ConfigMaps) -# ============================================================================= -config: - # --- Common (shared by all components) --- - common: - NODE_ENV: production - COUNTLY_PLUGINS: "mobile,web,desktop,plugins,density,locale,browser,sources,views,logger,systemlogs,populator,reports,crashes,push,star-rating,slipping-away-users,compare,server-stats,dbviewer,crash_symbolication,crash-analytics,alerts,onboarding,consolidate,remote-config,hooks,dashboards,sdk,data-manager,guides,heatmaps,retention_segments,formulas,funnels,cohorts,ab-testing,performance-monitoring,config-transfer,data-migration,two-factor-auth,blocking,concurrent_users,revenue,activity-map,flows,surveys,event-timeline,drill,multi,active_users,ip-blocker,kafka,clickhouse" - COUNTLY_CONFIG__FILESTORAGE: gridfs - COUNTLY_CONFIG__DRILL_EVENTS_DRIVER: clickhouse - COUNTLY_CONFIG__SHARED_CONNECTION: "true" - COUNTLY_CONFIG__DATABASE_ADAPTERPREFERENCE: '["clickhouse","mongodb"]' - COUNTLY_CONFIG__DATABASE_ADAPTERS_MONGODB_ENABLED: "true" - COUNTLY_CONFIG__DATABASE_ADAPTERS_CLICKHOUSE_ENABLED: "true" - COUNTLY_CONFIG__DATABASE_FAILONCONNECTIONERROR: "true" - COUNTLY_CONFIG__EVENTSINK_SINKS: '["kafka"]' - COUNTLY_CONFIG__RELOADCONFIGAFTER: "10000" - - # --- API --- - api: - COUNTLY_CONTAINER: api - COUNTLY_CONFIG__API_PORT: "3001" - COUNTLY_CONFIG__API_HOST: "0.0.0.0" - COUNTLY_CONFIG__API_MAX_SOCKETS: "1024" - COUNTLY_CONFIG__API_MAX_UPLOAD_FILE_SIZE: "209715200" # 200 MiB - COUNTLY_CONFIG__API_TIMEOUT: "120000" # ms - - # --- Frontend --- - frontend: - COUNTLY_CONTAINER: frontend - COUNTLY_CONFIG__WEB_PORT: "6001" - COUNTLY_CONFIG__WEB_HOST: "0.0.0.0" - COUNTLY_CONFIG__WEB_SECURE_COOKIES: "false" - COUNTLY_CONFIG__COOKIE_MAXAGE: "86400000" # 24 hours in ms - - # --- Ingestor --- - ingestor: - COUNTLY_CONTAINER: ingestor - COUNTLY_CONFIG__INGESTOR_PORT: "3010" - COUNTLY_CONFIG__INGESTOR_HOST: "0.0.0.0" - - # --- Aggregator --- - aggregator: - COUNTLY_CONTAINER: aggregator - UV_THREADPOOL_SIZE: "6" - - # --- Job Server --- - jobserver: - COUNTLY_CONTAINER: jobserver - COUNTLY_CONFIG__JOBSERVER_PORT: "3020" - COUNTLY_CONFIG__JOBSERVER_HOST: "0.0.0.0" - - # --- ClickHouse Connection --- - clickhouse: - COUNTLY_CONFIG__CLICKHOUSE_QUERYOPTIONS_MAX_EXECUTION_TIME: "600" - COUNTLY_CONFIG__CLICKHOUSE_REQUEST_TIMEOUT: "1200000" # ms - COUNTLY_CONFIG__CLICKHOUSE_MAX_OPEN_CONNECTIONS: "10" - COUNTLY_CONFIG__CLICKHOUSE_APPLICATION: countly_drill - COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_REQUEST: "false" - COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_RESPONSE: "false" - COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_ENABLED: "true" - COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_IDLE_SOCKET_TTL: "10000" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_NAME: countly_cluster - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_SHARDS: "false" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_REPLICAS: "false" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_ISCLOUD: "false" - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_COORDINATORTYPE: keeper - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_ZKPATH: "/clickhouse/tables/{shard}/{database}/{table}" - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_REPLICANAME: "{replica}" - COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_ENABLED: "false" - COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_MAXPARALLELREPLICAS: "2" - COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_WRITETHROUGH: "true" - COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_INSERTDISTRIBUTEDSYNC: "true" - COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_NATIVEPORT: "9000" - COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_SECURE: "false" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_DAYSOLD: "30" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MIN: "60" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MAX: "120" - - # --- Kafka Connection --- - kafka: - COUNTLY_CONFIG__KAFKA_ENABLED: "true" - COUNTLY_CONFIG__KAFKA_DRILLEVENTSTOPIC: drill-events - COUNTLY_CONFIG__KAFKA_CLUSTER_NAME: cly-kafka - COUNTLY_CONFIG__KAFKA_PARTITIONS: "100" - COUNTLY_CONFIG__KAFKA_REPLICATIONFACTOR: "2" - COUNTLY_CONFIG__KAFKA_RETENTIONMS: "604800000" # 7 days in ms - COUNTLY_CONFIG__KAFKA_ENABLETRANSACTIONS: "false" - COUNTLY_CONFIG__KAFKA_TRANSACTIONTIMEOUT: "60000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_CLIENTID: countly-app - COUNTLY_CONFIG__KAFKA_RDKAFKA_REQUESTTIMEOUTMS: "20000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_CONNECTIONTIMEOUTMS: "8000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_LINGERMS: "10" - COUNTLY_CONFIG__KAFKA_RDKAFKA_RETRIES: "5" - COUNTLY_CONFIG__KAFKA_RDKAFKA_ACKS: "-1" # -1 = all ISR replicas must acknowledge - COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMINBYTES: "1024000" - COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMAXWAITMS: "1200" - COUNTLY_CONFIG__KAFKA_CONSUMER_SESSIONTIMEOUTMS: "120000" - COUNTLY_CONFIG__KAFKA_CONSUMER_HEARTBEATINTERVALMS: "20000" - COUNTLY_CONFIG__KAFKA_CONSUMER_AUTOOFFSETRESET: earliest - COUNTLY_CONFIG__KAFKA_CONSUMER_ENABLEAUTOCOMMIT: "false" - COUNTLY_CONFIG__KAFKA_CONSUMER_MAXPOLLINTERVALMS: "600000" - COUNTLY_CONFIG__KAFKA_CONNECTCONSUMERGROUPID: "connect-ch" - - # --- OpenTelemetry --- - otel: - OTEL_ENABLED: "false" - OTEL_EXPORTER_OTLP_ENDPOINT: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4318" - OTEL_EXPORTER_OTLP_PROTOCOL: "http/protobuf" - OTEL_TRACES_SAMPLER: "parentbased_traceidratio" - OTEL_TRACES_SAMPLER_ARG: "1.0" # 0.0-1.0, fraction of traces to sample - PYROSCOPE_ENABLED: "false" - -# --- Node.js Options (injected into configmap per component) --- -nodeOptions: - api: "--max-old-space-size=3072 --max-semi-space-size=256" - frontend: "--max-old-space-size=2048" - ingestor: "--max-old-space-size=2048 --max-semi-space-size=256" - aggregator: "--max-old-space-size=3072 --max-semi-space-size=128" - jobserver: "--max-old-space-size=2048 --max-semi-space-size=256" - -# ============================================================================= -# Backing Service Modes -# ============================================================================= -# When mode=external, the corresponding chart is not deployed and connection -# details below are used instead. - -backingServices: - mongodb: - mode: bundled # bundled | external - host: "" - port: "27017" - connectionString: "" # If set, used as-is (bypasses host/port/user/pass) - username: "app" - password: "" - database: "admin" - replicaSet: "" - existingSecret: "" - # --- External MongoDB Atlas example --- - # mode: external - # connectionString: "mongodb+srv://user:pass@cluster0.example.mongodb.net/admin?retryWrites=true&w=majority" - - clickhouse: - mode: bundled # bundled | external - host: "" - port: "8123" - tls: "false" - username: "default" - password: "" - database: "countly_drill" - existingSecret: "" - # --- External ClickHouse Cloud example --- - # mode: external - # host: "abc123.us-east-1.aws.clickhouse.cloud" - # port: "8443" - # tls: "true" - - kafka: - mode: bundled # bundled | external - brokers: "" # Comma-separated broker list - securityProtocol: "PLAINTEXT" # PLAINTEXT | SSL | SASL_PLAINTEXT | SASL_SSL - saslMechanism: "" - saslUsername: "" - saslPassword: "" - existingSecret: "" - # --- External Confluent Cloud example --- - # mode: external - # brokers: "pkc-12345.us-east-1.aws.confluent.cloud:9092" - # securityProtocol: "SASL_SSL" - # saslMechanism: "PLAIN" - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - mode: values # values | existingSecret | externalSecret - keep: true # Retain secrets on helm uninstall - rotationId: "" # Change to force secret re-creation - - common: - existingSecret: "" - encryptionReportsKey: "" # REQUIRED: min 8 chars - webSessionSecret: "" # REQUIRED: min 8 chars - passwordSecret: "" # REQUIRED: min 8 chars - - clickhouse: - existingSecret: "" - username: "" - password: "" - database: "" - - kafka: - existingSecret: "" - securityProtocol: "" - saslMechanism: "" - saslUsername: "" - saslPassword: "" - - mongodb: - existingSecret: "" - key: "connectionString.standard" # Key within the secret to read - password: "" # REQUIRED on first install (must match users.app.password in countly-mongodb) - - # --- ExternalSecret configuration (used only when mode=externalSecret) --- - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: "" - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: "" - webSessionSecret: "" - passwordSecret: "" - clickhouse: - url: "" - username: "" - password: "" - database: "" - kafka: - brokers: "" - securityProtocol: "" - saslMechanism: "" - saslUsername: "" - saslPassword: "" - mongodb: - connectionString: "" - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - ingressNamespaceSelector: - kubernetes.io/metadata.name: ingress-nginx - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-custom-namespace - # ports: - # - port: 3001 - # protocol: TCP - -# ============================================================================= -# Ingress -# ============================================================================= -ingress: - enabled: true - className: nginx - annotations: - # F5 NGINX Ingress Controller (OSS) annotations - nginx.org/client-max-body-size: "50m" - nginx.org/proxy-buffering: "True" - nginx.org/proxy-buffer-size: "256k" - nginx.org/proxy-buffers: "16 256k" - nginx.org/proxy-busy-buffers-size: "512k" - nginx.org/proxy-max-temp-file-size: "2048m" - nginx.org/client-body-buffer-size: "2m" - nginx.org/proxy-connect-timeout: "60s" - nginx.org/proxy-read-timeout: "120s" - nginx.org/proxy-send-timeout: "120s" - nginx.org/keepalive: "256" - nginx.org/server-snippets: | - otel_trace on; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Scheme $scheme; - proxy_set_header Connection ""; - proxy_set_header X-Request-ID $request_id; - proxy_set_header X-Request-Start $msec; - # traceparent/tracestate now handled by ngx_otel_module (otel-trace-context: propagate) - client_header_timeout 30s; - nginx.org/location-snippets: | - proxy_request_buffering on; - proxy_next_upstream error timeout http_502 http_503 http_504; - proxy_next_upstream_timeout 30s; - proxy_next_upstream_tries 3; - proxy_temp_file_write_size 1m; - client_body_timeout 120s; - hostname: "" # Set via argocd/customers/.yaml - tls: - # TLS mode: letsencrypt | existingSecret | selfSigned | http - # http: No TLS - # letsencrypt: cert-manager + Let's Encrypt (recommended for production) - # existingSecret: Bring your own TLS secret - # selfSigned: cert-manager self-signed CA (for development) - mode: "" # Set via argocd/customers/.yaml - clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt - secretName: "" # Auto-derived if empty: -tls - selfSigned: - issuerName: "" # Auto-derived if empty: -ca-issuer - caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/environments/argo3/external-secrets.example.yaml b/environments/argo3/external-secrets.example.yaml deleted file mode 100644 index 7bf93ef..0000000 --- a/environments/argo3/external-secrets.example.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in environments//countly.yaml: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: my-secret-store -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "countly/encryption-reports-key" -# webSessionSecret: "countly/web-session-secret" -# passwordSecret: "countly/password-secret" -# clickhouse: -# url: "countly/clickhouse-url" -# username: "countly/clickhouse-username" -# password: "countly/clickhouse-password" -# database: "countly/clickhouse-database" -# kafka: -# brokers: "countly/kafka-brokers" -# securityProtocol: "countly/kafka-security-protocol" -# mongodb: -# connectionString: "countly/mongodb-connection-string" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/argo3/global.yaml b/environments/argo3/global.yaml deleted file mode 100644 index f0d6f8a..0000000 --- a/environments/argo3/global.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - storageClass: "" - imagePullSecrets: [] - -ingress: - hostname: argo3.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/argo3/kafka.yaml b/environments/argo3/kafka.yaml deleted file mode 100644 index 0a9b864..0000000 --- a/environments/argo3/kafka.yaml +++ /dev/null @@ -1,384 +0,0 @@ -# ============================================================================= -# Kafka Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-kafka/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Strimzi Operator API Version --- -strimzi: - apiVersion: kafka.strimzi.io/v1 - -# --- Kafka Version --- -version: "4.2.0" - -# ============================================================================= -# Brokers -# ============================================================================= -brokers: - replicas: 3 - resources: - requests: - cpu: "1" - memory: "4Gi" - limits: - cpu: "1" - memory: "4Gi" - jvmOptions: - xms: "2g" - xmx: "2g" - - # --- Persistence --- - persistence: - volumes: - - id: 0 - size: 100Gi - storageClass: "" - deleteClaim: false # Delete PVC when broker is removed - - # --- Broker Config --- - config: - default.replication.factor: 2 - min.insync.replicas: 2 - log.retention.hours: 168 # 7 days - log.segment.bytes: "1073741824" # 1 GiB - compression.type: lz4 - auto.create.topics.enable: false - offsets.topic.replication.factor: 2 - num.partitions: 24 - transaction.state.log.replication.factor: 2 - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - -# ============================================================================= -# Controllers (KRaft) -# ============================================================================= -controllers: - replicas: 3 - resources: - requests: - cpu: "500m" - memory: "2Gi" - limits: - cpu: "1" - memory: "2Gi" - - persistence: - size: 20Gi - storageClass: "" - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - -# ============================================================================= -# Listeners -# ============================================================================= -listeners: - - name: internal - port: 9092 - type: internal - tls: false - -# ============================================================================= -# Cruise Control -# ============================================================================= -cruiseControl: - enabled: true - resources: - requests: - cpu: "1" - memory: "2Gi" - limits: - cpu: "1" - memory: "2Gi" - jvmOptions: - xms: "1g" - xmx: "2g" - autoRebalance: - - mode: add-brokers - - mode: remove-brokers - -# ============================================================================= -# Kafka Connect (ClickHouse Sink) -# ============================================================================= -kafkaConnect: - enabled: true - name: connect-ch - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" - replicas: 2 - bootstrapServers: "" # Auto-derived from cluster if empty - - resources: - requests: - cpu: "2" - memory: "8Gi" - limits: - cpu: "2" - memory: "8Gi" - jvmOptions: - xms: "5g" - xmx: "5g" - - # --- Worker Configuration --- - workerConfig: - group.id: connect-ch - config.storage.topic: connect_ch_configs - offset.storage.topic: connect_ch_offsets - status.storage.topic: connect_ch_status - config.storage.replication.factor: 2 - offset.storage.replication.factor: 2 - status.storage.replication.factor: 2 - offset.storage.partitions: 25 - status.storage.partitions: 5 - key.converter: org.apache.kafka.connect.storage.StringConverter - value.converter: org.apache.kafka.connect.json.JsonConverter - value.converter.schemas.enable: "false" - connector.client.config.override.policy: All - config.providers: env - config.providers.env.class: org.apache.kafka.common.config.provider.EnvVarConfigProvider - - # --- ClickHouse Connection (for the sink connector) --- - clickhouse: - existingSecret: "" - secretName: clickhouse-auth - host: "" # Auto-derived from clickhouseNamespace if empty - port: "8123" - ssl: "false" - database: "countly_drill" - username: "default" - password: "" # REQUIRED: must match ClickHouse default user password - - # --- Environment Variables (injected into Connect pods) --- - env: - EXACTLY_ONCE: "false" - ERRORS_RETRY_TIMEOUT: "300" - ERRORS_TOLERANCE: "none" # none | all - CLICKHOUSE_SETTINGS: "input_format_binary_read_json_as_string=1,allow_experimental_json_type=1,enable_json_type=1,async_insert=1,wait_for_async_insert=1,async_insert_use_adaptive_busy_timeout=1,async_insert_busy_timeout_ms=10000,async_insert_max_data_size=268435456,async_insert_max_query_number=64,min_insert_block_size_rows=250000,min_insert_block_size_bytes=268435456,max_partitions_per_insert_block=500" - BYPASS_ROW_BINARY: "false" - TABLE_REFRESH_INTERVAL: "300" # seconds - KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter - VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter - VALUE_CONVERTER_SCHEMAS_ENABLE: "false" - KAFKA_CONSUMER_FETCH_MIN_BYTES: "33554432" # 32 MiB - KAFKA_CONSUMER_FETCH_MAX_WAIT_MS: "60000" - KAFKA_CONSUMER_MAX_POLL_RECORDS: "250000" - KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES: "134217728" # 128 MiB - KAFKA_CONSUMER_FETCH_MAX_BYTES: "536870912" # 512 MiB - KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS: "900000" - KAFKA_CONSUMER_SESSION_TIMEOUT_MS: "45000" - KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS: "15000" - KAFKA_CONSUMER_REQUEST_TIMEOUT_MS: "120000" - - # --- HPA --- - hpa: - enabled: false - minReplicas: 1 - maxReplicas: 3 - metrics: - cpu: - averageUtilization: 70 - memory: - averageUtilization: 80 - behavior: - scaleUp: - stabilizationWindowSeconds: 120 - policies: - - type: Percent - value: 50 - periodSeconds: 120 - - type: Pods - value: 2 - periodSeconds: 120 - selectPolicy: Min - scaleDown: - stabilizationWindowSeconds: 600 - policies: - - type: Percent - value: 25 - periodSeconds: 300 - - type: Pods - value: 1 - periodSeconds: 300 - selectPolicy: Min - - # --- OpenTelemetry Java Agent --- - # Baked into the Docker image at /opt/otel/opentelemetry-javaagent.jar. - # When enabled, JAVA_TOOL_OPTIONS activates it for Kafka consumer/producer - # and outbound HTTP (ClickHouse sink) span creation. - otel: - enabled: false - serviceName: "kafka-connect" - exporterEndpoint: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4317" - exporterProtocol: "grpc" - sampler: "parentbased_traceidratio" - samplerArg: "1.0" - resourceAttributes: "" # e.g. "deployment.environment=production,k8s.cluster.name=my-cluster" - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - - # --- Connectors --- - connectors: - - name: ch-sink-drill-events - enabled: true - state: running # running | paused | stopped - class: com.clickhouse.kafka.connect.ClickHouseSinkConnector - tasksMax: 1 - autoRestart: - enabled: true - maxRestarts: 10 - config: - topics: drill-events - topic2TableMap: "drill-events=drill_events" - hostname: "${env:CLICKHOUSE_HOST}" - port: "${env:CLICKHOUSE_PORT}" - ssl: "${env:CLICKHOUSE_SSL}" - database: "${env:CLICKHOUSE_DB}" - username: "${env:CLICKHOUSE_USER}" - password: "${env:CLICKHOUSE_PASSWORD}" - exactlyOnce: "${env:EXACTLY_ONCE}" - errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" - errors.tolerance: "${env:ERRORS_TOLERANCE}" - clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" - bypassRowBinary: "${env:BYPASS_ROW_BINARY}" - tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" - key.converter: "${env:KEY_CONVERTER}" - value.converter: "${env:VALUE_CONVERTER}" - value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" - consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" - consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" - consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" - consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" - consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" - consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" - consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" - consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" - consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" - connection.timeout: "60" - socket.timeout: "30000" - retry.count: "3" - connection.pool.size: "10" - healthcheck.enabled: "true" - healthcheck.interval: "10000" - dlq: {} # Dead-letter queue config (empty = disabled) - -# ============================================================================= -# Metrics -# ============================================================================= -metrics: - enabled: true - -# --- Cross-Namespace Reference --- -clickhouseNamespace: clickhouse - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 9092 - # protocol: TCP - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall -# Kafka overrides for argo3 -# -# Migration is disabled for this customer, so keep the drill sink connector off. - -kafkaConnect: - connectors: - - name: ch-sink-drill-events - enabled: false - state: running - class: com.clickhouse.kafka.connect.ClickHouseSinkConnector - tasksMax: 1 - autoRestart: - enabled: true - maxRestarts: 10 - config: - topics: drill-events - topic2TableMap: "drill-events=drill_events" - hostname: "${env:CLICKHOUSE_HOST}" - port: "${env:CLICKHOUSE_PORT}" - ssl: "${env:CLICKHOUSE_SSL}" - database: "${env:CLICKHOUSE_DB}" - username: "${env:CLICKHOUSE_USER}" - password: "${env:CLICKHOUSE_PASSWORD}" - exactlyOnce: "${env:EXACTLY_ONCE}" - errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" - errors.tolerance: "${env:ERRORS_TOLERANCE}" - clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" - bypassRowBinary: "${env:BYPASS_ROW_BINARY}" - tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" - key.converter: "${env:KEY_CONVERTER}" - value.converter: "${env:VALUE_CONVERTER}" - value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" - consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" - consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" - consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" - consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" - consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" - consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" - consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" - consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" - consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" - connection.timeout: "60" - socket.timeout: "30000" - retry.count: "3" - connection.pool.size: "10" - healthcheck.enabled: "true" - healthcheck.interval: "10000" - dlq: {} diff --git a/environments/argo3/migration.yaml b/environments/argo3/migration.yaml deleted file mode 100644 index 6fa760c..0000000 --- a/environments/argo3/migration.yaml +++ /dev/null @@ -1,3 +0,0 @@ -# Migration overrides for optional countly-migration app. -# Enable per customer by setting `migration: enabled` in argocd/customers/.yaml -# and then filling this file with environment-specific overrides as needed. diff --git a/environments/argo3/mongodb.yaml b/environments/argo3/mongodb.yaml deleted file mode 100644 index 31f230e..0000000 --- a/environments/argo3/mongodb.yaml +++ /dev/null @@ -1,144 +0,0 @@ -# ============================================================================= -# MongoDB Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-mongodb/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# ============================================================================= -# MongoDB Server -# ============================================================================= -mongodb: - version: "8.2.5" - members: 2 # Replica set member count - - resources: - requests: - cpu: "500m" - memory: "2Gi" - limits: - cpu: "2" - memory: "8Gi" - - persistence: - storageClass: "" # Overrides global.storageClass for MongoDB PVCs - size: 100Gi - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 - - # --- TLS --- - tls: - enabled: false - -# ============================================================================= -# Users -# ============================================================================= -users: - # --- Application User --- - app: - name: app - database: admin - roles: - - name: readWriteAnyDatabase - db: admin - - name: dbAdmin - db: admin - passwordSecretName: app-user-password - passwordSecretKey: password - password: "" # REQUIRED on first install - - # --- Metrics Exporter User --- - metrics: - enabled: true - name: metrics - database: admin - roles: - - name: clusterMonitor - db: admin - - name: read - db: local - passwordSecretName: metrics-user-password - passwordSecretKey: password - password: "" # REQUIRED on first install - -# ============================================================================= -# Prometheus Exporter -# ============================================================================= -exporter: - enabled: true - image: percona/mongodb_exporter:0.40.0 - port: 9216 - resources: - requests: - cpu: "50m" - memory: "64Mi" - limits: - cpu: "200m" - memory: "256Mi" - service: - enabled: true - args: - - --collect-all - - --collector.diagnosticdata - - --collector.replicasetstatus - - --collector.dbstats - - --collector.topmetrics - - --collector.indexstats - - --collector.collstats - -# ============================================================================= -# Pod Disruption Budget -# ============================================================================= -podDisruptionBudget: - enabled: false - maxUnavailable: 1 - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 27017 - # protocol: TCP - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall diff --git a/environments/argo3/observability.yaml b/environments/argo3/observability.yaml deleted file mode 100644 index 79a4b39..0000000 --- a/environments/argo3/observability.yaml +++ /dev/null @@ -1,437 +0,0 @@ -# ============================================================================= -# Observability Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-observability/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Deployment Mode --- -# full — All backends + Grafana in-cluster -# hybrid — All backends in-cluster, no Grafana (use external Grafana) -# external — Collectors only, forward to external endpoints -# disabled — Set in global.yaml observability.mode to skip this chart entirely -mode: full - -# --- Cluster Name (injected into Prometheus external_labels) --- -clusterName: countly-local - -# --- Cross-Namespace References --- -countlyNamespace: countly -clickhouseNamespace: clickhouse -mongodbNamespace: mongodb -kafkaNamespace: kafka -ingressNamespace: ingress-nginx -certManagerNamespace: cert-manager -clickhouseOperatorNamespace: clickhouse-operator-system - -# --- NGINX Ingress Controller Scrape Configuration --- -nginxIngress: - podLabelName: "nginx-ingress" # F5 NGINX IC = "nginx-ingress", community = "ingress-nginx" - metricsPort: "9113" - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -# ============================================================================= -# Per-Signal Configuration -# ============================================================================= - -# --- Metrics --- -metrics: - enabled: true - sampling: - interval: "15s" # Global Prometheus scrape_interval - -# --- Traces --- -traces: - enabled: true - sampling: - strategy: "AlwaysOn" # AlwaysOn | TraceIdRatio | ParentBased | TailBased - ratio: 1.0 # 0.0-1.0, used with TraceIdRatio or ParentBased - tailSampling: # Only used when strategy == TailBased - waitDuration: "10s" - numTraces: 50000 - policies: - keepErrors: true - latencyThresholdMs: 2000 - baselineRatio: 0.1 - -# --- Logs --- -logs: - enabled: true - sampling: - enabled: false - dropRate: 0 # 0.0-1.0, fraction of logs to drop - -# --- Profiling --- -profiling: - enabled: true - sampling: - rate: "100" # Advisory — used in NOTES.txt for SDK config - -# ============================================================================= -# Prometheus -# ============================================================================= -prometheus: - image: - repository: prom/prometheus - tag: "v3.10.0" - retention: - time: "30d" - size: "50GB" - storage: - size: 100Gi - storageClass: "" - resources: - requests: - cpu: "2" - memory: "3Gi" - limits: - cpu: "2" - memory: "4Gi" - extraArgs: [] - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - remoteWriteUrl: "" # Full Prometheus remote write URL (e.g. https://prom.corp.com/api/v1/write) - -# ============================================================================= -# Loki -# ============================================================================= -loki: - image: - repository: grafana/loki - tag: "3.6.7" - retention: "30d" - storage: - backend: "filesystem" # filesystem | s3 | gcs | azure - size: 100Gi - storageClass: "" - # Object storage settings (only used when backend != filesystem) - bucket: "" # Bucket/container name (REQUIRED for object backends) - endpoint: "" # Custom endpoint (e.g. MinIO: http://minio:9000) - region: "" # Cloud region (S3) - insecure: false # Use HTTP instead of HTTPS - forcePathStyle: false # S3 path-style access (required for MinIO) - # Credential file secret (for GCS JSON key files) - existingSecret: "" # K8s Secret name to mount - secretKey: "key.json" # Key within the Secret - secretMountPath: "/var/secrets/storage" - # Env-based credentials (for AWS access keys, Azure account keys) - envFromSecret: "" # K8s Secret name to inject as env vars - # Provider-specific passthrough (rendered directly into provider block) - config: {} - # - # --- Object storage examples (apply to loki, tempo, and pyroscope) --- - # - # --- AWS S3 example --- - # backend: s3 - # s3: - # bucket: my-loki-data - # region: us-east-1 - # endpoint: "" - # insecure: false - # forcePathStyle: false - # credentials: - # source: envFromSecret - # envFromSecret: loki-s3-credentials # Must contain AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY - # - # --- GCS example --- - # backend: gcs - # gcs: - # bucket: my-loki-data - # credentials: - # source: existingSecret - # existingSecret: loki-gcs-key - # secretKey: "key.json" - # - # --- MinIO example --- - # backend: s3 - # s3: - # bucket: loki - # endpoint: minio.storage.svc.cluster.local:9000 - # insecure: true - # forcePathStyle: true - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1" - memory: "2Gi" - config: - maxStreamsPerUser: 30000 - maxLineSize: 256000 - ingestionRateMb: 64 - ingestionBurstSizeMb: 128 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - pushUrl: "" # Full Loki push URL (e.g. https://loki.corp.com/loki/api/v1/push) - -# ============================================================================= -# Tempo -# ============================================================================= -tempo: - image: - repository: grafana/tempo - tag: "2.10.1" - retention: "12h" - storage: - backend: "local" # local | s3 | gcs | azure - size: 150Gi - storageClass: "" - bucket: "" - endpoint: "" - region: "" - insecure: false - forcePathStyle: false - existingSecret: "" - secretKey: "key.json" - secretMountPath: "/var/secrets/storage" - envFromSecret: "" - config: {} - resources: - requests: - cpu: "3" - memory: "6Gi" - limits: - cpu: "4" - memory: "10Gi" - config: - ingestionRateLimitBytes: 100000000 - ingestionBurstSizeBytes: 150000000 - maxTracesPerUser: 50000 - maxBytesPerTrace: 5000000 - maxRecvMsgSizeMiB: 16 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - otlpGrpcEndpoint: "" # Tempo OTLP gRPC host:port (e.g. tempo.corp.com:4317) - otlpHttpEndpoint: "" # Tempo OTLP HTTP URL (optional fallback) - -# ============================================================================= -# Pyroscope -# ============================================================================= -pyroscope: - image: - repository: grafana/pyroscope - tag: "1.18.1" - retention: "72h" - storage: - backend: "filesystem" # filesystem | s3 | gcs | azure | swift - size: 20Gi - storageClass: "" - bucket: "" - endpoint: "" - region: "" - insecure: false - forcePathStyle: false - existingSecret: "" - secretKey: "key.json" - secretMountPath: "/var/secrets/storage" - envFromSecret: "" - config: {} - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1" - memory: "2Gi" - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - ingestUrl: "" # Pyroscope ingest URL (e.g. https://pyroscope.corp.com) - -# ============================================================================= -# Grafana -# ============================================================================= -grafana: - enabled: true # Only deployed when mode == "full" - image: - repository: grafana/grafana - tag: "12.4.0" - admin: - existingSecret: "" # Use an existing Secret for admin credentials - userKey: "admin-user" - passwordKey: "admin-password" - persistence: - enabled: false # Ephemeral by default (declarative config, no state to lose) - size: 10Gi - storageClass: "" - resources: - requests: - cpu: "1" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - plugins: - install: "grafana-pyroscope-datasource" - featureToggles: "tempoSearch,tempoBackendSearch,traceqlEditor,exploreTraces" - dashboards: - enabled: true - overview: true - platform: true - countly: true - data: true - edge: true - pdb: - enabled: false - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - url: "" # External Grafana URL (for NOTES.txt only) - -# ============================================================================= -# Alloy (DaemonSet — log collection) -# ============================================================================= -alloy: - image: - repository: grafana/alloy - tag: "v1.13.2" - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - scheduling: - nodeSelector: - kubernetes.io/os: linux - tolerations: [] - -# ============================================================================= -# Alloy-OTLP (Deployment — OTLP traces + profiling receive) -# ============================================================================= -alloyOtlp: - image: - repository: grafana/alloy - tag: "v1.13.2" - replicas: 1 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - memoryLimiter: - limit: "1600MiB" # Must be < resources.limits.memory - spikeLimit: "400MiB" - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# Alloy-Metrics (Deployment — ALL Prometheus scraping) -# ============================================================================= -alloyMetrics: - image: - repository: grafana/alloy - tag: "v1.13.2" - replicas: 1 - resources: - requests: - cpu: "500m" - memory: "512Mi" - limits: - cpu: "500m" - memory: "512Mi" - pdb: - enabled: false - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# kube-state-metrics -# ============================================================================= -kubeStateMetrics: - enabled: true - image: - repository: registry.k8s.io/kube-state-metrics/kube-state-metrics - tag: "v2.18.0" - resources: - requests: - cpu: "10m" - memory: "32Mi" - limits: - cpu: "100m" - memory: "256Mi" - namespaces: - - countly - - observability - - ingress-nginx - - kube-system - - clickhouse - - mongodb - - kafka - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# node-exporter -# ============================================================================= -nodeExporter: - enabled: true - image: - repository: prom/node-exporter - tag: "v1.10.2" - resources: - requests: - cpu: "100m" - memory: "180Mi" - limits: - cpu: "250m" - memory: "300Mi" - -# ============================================================================= -# Ingress (for Grafana) -# ============================================================================= -ingress: - enabled: false - className: nginx - annotations: {} - hosts: - - host: obs.example.com - tls: [] - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 4318 - # protocol: TCP diff --git a/environments/argo3/secrets-clickhouse.yaml b/environments/argo3/secrets-clickhouse.yaml deleted file mode 100644 index d74b27b..0000000 --- a/environments/argo3/secrets-clickhouse.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# ClickHouse direct secrets for argo3 -auth: - defaultUserPassword: - password: "argo3-clickhouse-2026" diff --git a/environments/argo3/secrets-countly.yaml b/environments/argo3/secrets-countly.yaml deleted file mode 100644 index d4d41d5..0000000 --- a/environments/argo3/secrets-countly.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Countly direct secrets for argo3 -secrets: - mode: values - common: - encryptionReportsKey: "argo3-reports-key-2026" - webSessionSecret: "argo3-web-session-2026" - passwordSecret: "argo3-password-secret-2026" - clickhouse: - username: "default" - password: "argo3-clickhouse-2026" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - mongodb: - password: "argo3-mongo-2026" diff --git a/environments/argo3/secrets-kafka.yaml b/environments/argo3/secrets-kafka.yaml deleted file mode 100644 index 98980a7..0000000 --- a/environments/argo3/secrets-kafka.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# Kafka direct secrets for argo3 -kafkaConnect: - clickhouse: - password: "argo3-clickhouse-2026" diff --git a/environments/argo3/secrets-migration.yaml b/environments/argo3/secrets-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/argo3/secrets-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/argo3/secrets-mongodb.yaml b/environments/argo3/secrets-mongodb.yaml deleted file mode 100644 index 2e5a2c6..0000000 --- a/environments/argo3/secrets-mongodb.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# MongoDB direct secrets for argo3 -users: - app: - password: "argo3-mongo-2026" - metrics: - enabled: true - password: "argo3-metrics-2026" diff --git a/environments/argo3/secrets-observability.yaml b/environments/argo3/secrets-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/argo3/secrets-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/argo3/secrets.example.yaml b/environments/argo3/secrets.example.yaml deleted file mode 100644 index 282eb0d..0000000 --- a/environments/argo3/secrets.example.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//secrets-countly.yaml) --- -secrets: - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- -users: - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//secrets-kafka.yaml) --- -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" diff --git a/environments/argo3/secrets.sops.example.yaml b/environments/argo3/secrets.sops.example.yaml deleted file mode 100644 index 9b652d1..0000000 --- a/environments/argo3/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//secrets-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//secrets-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx diff --git a/environments/helm-argo-test/clickhouse.yaml b/environments/helm-argo-test/clickhouse.yaml deleted file mode 100644 index e06126c..0000000 --- a/environments/helm-argo-test/clickhouse.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# ClickHouse overrides for helm-argo-test -# -# Runtime sizing comes from: -# - profiles/sizing/tier1/clickhouse.yaml -# - profiles/security/open/clickhouse.yaml diff --git a/environments/helm-argo-test/countly.yaml b/environments/helm-argo-test/countly.yaml deleted file mode 100644 index 45f8607..0000000 --- a/environments/helm-argo-test/countly.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Countly overrides for helm-argo-test -# -# Most runtime sizing comes from: -# - profiles/sizing/tier1/countly.yaml -# - profiles/tls/letsencrypt/countly.yaml -# - profiles/observability/disabled/countly.yaml -# - profiles/security/open/countly.yaml diff --git a/environments/helm-argo-test/global.yaml b/environments/helm-argo-test/global.yaml deleted file mode 100644 index b0d3ff6..0000000 --- a/environments/helm-argo-test/global.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - storageClass: "" - imagePullSecrets: [] - -ingress: - hostname: helm-argo-test.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/helm-argo-test/kafka.yaml b/environments/helm-argo-test/kafka.yaml deleted file mode 100644 index 8ade5b9..0000000 --- a/environments/helm-argo-test/kafka.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# Kafka overrides for helm-argo-test -# -# Runtime sizing comes from: -# - profiles/sizing/tier1/kafka.yaml -# - profiles/kafka-connect/balanced/kafka.yaml -# - profiles/observability/disabled/kafka.yaml -# - profiles/security/open/kafka.yaml - -kafkaConnect: - connectors: - - name: ch-sink-drill-events - enabled: false - state: running - class: com.clickhouse.kafka.connect.ClickHouseSinkConnector - tasksMax: 1 - autoRestart: - enabled: true - maxRestarts: 10 - config: - topics: drill-events - topic2TableMap: "drill-events=drill_events" - hostname: "${env:CLICKHOUSE_HOST}" - port: "${env:CLICKHOUSE_PORT}" - ssl: "${env:CLICKHOUSE_SSL}" - database: "${env:CLICKHOUSE_DB}" - username: "${env:CLICKHOUSE_USER}" - password: "${env:CLICKHOUSE_PASSWORD}" - exactlyOnce: "${env:EXACTLY_ONCE}" - errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" - errors.tolerance: "${env:ERRORS_TOLERANCE}" - clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" - bypassRowBinary: "${env:BYPASS_ROW_BINARY}" - tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" - key.converter: "${env:KEY_CONVERTER}" - value.converter: "${env:VALUE_CONVERTER}" - value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" - consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" - consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" - consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" - consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" - consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" - consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" - consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" - consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" - consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" - connection.timeout: "60" - socket.timeout: "30000" - retry.count: "3" - connection.pool.size: "10" - healthcheck.enabled: "true" - healthcheck.interval: "10000" - dlq: {} diff --git a/environments/helm-argo-test/migration.yaml b/environments/helm-argo-test/migration.yaml deleted file mode 100644 index 6cad86c..0000000 --- a/environments/helm-argo-test/migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Migration is optional and disabled for helm-argo-test. diff --git a/environments/helm-argo-test/mongodb.yaml b/environments/helm-argo-test/mongodb.yaml deleted file mode 100644 index 4939769..0000000 --- a/environments/helm-argo-test/mongodb.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# MongoDB overrides for helm-argo-test -# -# Runtime sizing comes from: -# - profiles/sizing/tier1/mongodb.yaml -# - profiles/security/open/mongodb.yaml diff --git a/environments/helm-argo-test/observability.yaml b/environments/helm-argo-test/observability.yaml deleted file mode 100644 index 393fd39..0000000 --- a/environments/helm-argo-test/observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -mode: disabled -clusterName: helm-argo-test diff --git a/environments/helm-argo-test/secrets-clickhouse.yaml b/environments/helm-argo-test/secrets-clickhouse.yaml deleted file mode 100644 index 02f9fc6..0000000 --- a/environments/helm-argo-test/secrets-clickhouse.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# ClickHouse secrets — fill these in before first deploy. -auth: - defaultUserPassword: - password: "helm-argo-test-clickhouse-2026" - externalSecret: - enabled: false diff --git a/environments/helm-argo-test/secrets-countly.yaml b/environments/helm-argo-test/secrets-countly.yaml deleted file mode 100644 index eeafe3f..0000000 --- a/environments/helm-argo-test/secrets-countly.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Countly secrets — fill these in before first deploy. -# This file is intentionally using direct Helm values for initial setup. -secrets: - mode: values - common: - encryptionReportsKey: "helm-argo-test-reports-key-2026" - webSessionSecret: "helm-argo-test-web-session-2026" - passwordSecret: "helm-argo-test-password-secret-2026" - clickhouse: - username: "default" - password: "helm-argo-test-clickhouse-2026" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - mongodb: - password: "helm-argo-test-mongo-2026" diff --git a/environments/helm-argo-test/secrets-kafka.yaml b/environments/helm-argo-test/secrets-kafka.yaml deleted file mode 100644 index fb5a94a..0000000 --- a/environments/helm-argo-test/secrets-kafka.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Kafka secrets — fill these in before first deploy. -kafkaConnect: - clickhouse: - username: "default" - password: "helm-argo-test-clickhouse-2026" - externalSecret: - enabled: false diff --git a/environments/helm-argo-test/secrets-migration.yaml b/environments/helm-argo-test/secrets-migration.yaml deleted file mode 100644 index 6cad86c..0000000 --- a/environments/helm-argo-test/secrets-migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Migration is optional and disabled for helm-argo-test. diff --git a/environments/helm-argo-test/secrets-mongodb.yaml b/environments/helm-argo-test/secrets-mongodb.yaml deleted file mode 100644 index d2f4271..0000000 --- a/environments/helm-argo-test/secrets-mongodb.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# MongoDB secrets — fill these in before first deploy. -users: - app: - password: "helm-argo-test-mongo-2026" - createSecret: true - externalSecret: - enabled: false - metrics: - enabled: true - password: "helm-argo-test-metrics-2026" - createSecret: true - externalSecret: - enabled: false diff --git a/environments/helm-argo-test/secrets-observability.yaml b/environments/helm-argo-test/secrets-observability.yaml deleted file mode 100644 index b7ad3b1..0000000 --- a/environments/helm-argo-test/secrets-observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Observability is disabled for helm-argo-test. diff --git a/environments/helm-argocd/clickhouse.yaml b/environments/helm-argocd/clickhouse.yaml deleted file mode 100644 index 5dead21..0000000 --- a/environments/helm-argocd/clickhouse.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# ClickHouse overrides for helm-argocd -# -# Runtime sizing comes from: -# - profiles/sizing/production/clickhouse.yaml -# - profiles/security/open/clickhouse.yaml diff --git a/environments/helm-argocd/countly.yaml b/environments/helm-argocd/countly.yaml deleted file mode 100644 index 508c17a..0000000 --- a/environments/helm-argocd/countly.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Countly overrides for helm-argocd -# -# Most runtime sizing comes from: -# - profiles/sizing/production/countly.yaml -# - profiles/tls/letsencrypt/countly.yaml -# - profiles/observability/full/countly.yaml -# - profiles/security/open/countly.yaml diff --git a/environments/helm-argocd/global.yaml b/environments/helm-argocd/global.yaml deleted file mode 100644 index e6566f3..0000000 --- a/environments/helm-argocd/global.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: production - observability: full - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - storageClass: "" - imagePullSecrets: [] - -ingress: - hostname: helm-argocd.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/helm-argocd/kafka.yaml b/environments/helm-argocd/kafka.yaml deleted file mode 100644 index 02e5309..0000000 --- a/environments/helm-argocd/kafka.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Kafka overrides for helm-argocd -# -# Runtime sizing comes from: -# - profiles/sizing/production/kafka.yaml -# - profiles/kafka-connect/balanced/kafka.yaml -# - profiles/observability/full/kafka.yaml -# - profiles/security/open/kafka.yaml diff --git a/environments/helm-argocd/mongodb.yaml b/environments/helm-argocd/mongodb.yaml deleted file mode 100644 index 56dbbfb..0000000 --- a/environments/helm-argocd/mongodb.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# MongoDB overrides for helm-argocd -# -# Runtime sizing comes from: -# - profiles/sizing/production/mongodb.yaml -# - profiles/security/open/mongodb.yaml diff --git a/environments/helm-argocd/observability.yaml b/environments/helm-argocd/observability.yaml deleted file mode 100644 index 41ce526..0000000 --- a/environments/helm-argocd/observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -mode: full -clusterName: helm-argocd diff --git a/environments/helm-argocd/secrets-clickhouse.yaml b/environments/helm-argocd/secrets-clickhouse.yaml deleted file mode 100644 index f08f949..0000000 --- a/environments/helm-argocd/secrets-clickhouse.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# ClickHouse secrets — fill these in before first deploy. -auth: - defaultUserPassword: - password: "default123" - externalSecret: - enabled: false diff --git a/environments/helm-argocd/secrets-countly.yaml b/environments/helm-argocd/secrets-countly.yaml deleted file mode 100644 index ee72d2f..0000000 --- a/environments/helm-argocd/secrets-countly.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Countly secrets — fill these in before first deploy. -# This file is intentionally using direct Helm values for initial setup. -secrets: - mode: values - common: - encryptionReportsKey: "helm-argocd-reports-key-2026" - webSessionSecret: "helm-argocd-web-session-2026" - passwordSecret: "helm-argocd-password-secret-2026" - clickhouse: - username: "default" - password: "default123" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - mongodb: - password: "mongo123" diff --git a/environments/helm-argocd/secrets-kafka.yaml b/environments/helm-argocd/secrets-kafka.yaml deleted file mode 100644 index efb2f9d..0000000 --- a/environments/helm-argocd/secrets-kafka.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Kafka secrets — fill these in before first deploy. -kafkaConnect: - clickhouse: - username: "default" - password: "default123" - externalSecret: - enabled: false diff --git a/environments/helm-argocd/secrets-mongodb.yaml b/environments/helm-argocd/secrets-mongodb.yaml deleted file mode 100644 index f96763f..0000000 --- a/environments/helm-argocd/secrets-mongodb.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# MongoDB secrets — fill these in before first deploy. -users: - app: - password: "mongo123" - createSecret: true - externalSecret: - enabled: false - metrics: - enabled: true - password: "mongo-metrics123" - createSecret: true - externalSecret: - enabled: false diff --git a/environments/helm-argocd/secrets-observability.yaml b/environments/helm-argocd/secrets-observability.yaml deleted file mode 100644 index a1c2697..0000000 --- a/environments/helm-argocd/secrets-observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Observability secrets — none required for bundled mode by default. From 14d6025935dffe4cc4fc279eddc3b586cf919a5b Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 15:48:22 +0530 Subject: [PATCH 30/79] Simplify Argo CD customer onboarding guide --- argocd/README.md | 282 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 236 insertions(+), 46 deletions(-) diff --git a/argocd/README.md b/argocd/README.md index 11cabc8..31eacea 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -1,69 +1,97 @@ -# ArgoCD Bootstrap For Customer Deployments +# Argo CD Customer Deployment Guide -This folder bootstraps Countly for multiple customers using ArgoCD `ApplicationSet`. +This folder contains the GitOps setup used to deploy Countly to many customer clusters with Argo CD. -## What This Layout Does +The short version: -- `operators/` bootstraps required platform operators into the target cluster. -- `projects/` defines the shared ArgoCD `AppProject` used by customer apps. -- `customers/` contains one metadata file per customer/cluster. -- `applicationsets/` generates one ArgoCD `Application` per component per customer. -- `environments//` stores the Helm values used by those Applications. -- `root-application.yaml` creates one parent ArgoCD Application that syncs this whole `argocd/` folder. +1. Register the customer cluster in Argo CD. +2. Create a customer scaffold with the helper script. +3. Fill in the customer secrets and profile choices. +4. Commit the customer files. +5. Sync `countly-bootstrap`. +6. Argo CD creates the per-customer apps automatically. -For the initial rollout, ArgoCD is scoped to one customer metadata file: +## Folder Overview -- `argocd/customers/helm-argocd.yaml` +- `root-application.yaml` + - The parent Argo CD application. + - Sync this when you want Argo CD to pick up Git changes in `argocd/`. +- `projects/customers.yaml` + - Shared Argo CD project for customer apps. +- `operators/` + - Per-customer platform apps such as cert-manager, ingress, MongoDB operator, ClickHouse operator, and Strimzi. +- `applicationsets/` + - Generates one Argo CD `Application` per component per customer. +- `customers/` + - One small metadata file per customer. +- `../environments//` + - Helm values and secrets for that customer. -## Before You Sync +## What Gets Created For Each Customer -1. Install ArgoCD and the ApplicationSet controller. -2. Register each target cluster in ArgoCD. -3. Create or update one customer metadata file in: - - `argocd/customers/.yaml` - - The `server:` value must match the cluster entry registered in ArgoCD. - - Set `hostname:` to the customer domain. -4. Replace the environment hostname in: - - `environments//global.yaml` -5. Populate the direct values in the customer `secrets-*.yaml` files before the first deploy. -6. Configure ArgoCD custom health checks for MongoDB, ClickHouse, and Strimzi CRs. +Core apps: +- MongoDB +- ClickHouse +- Kafka +- Countly + +Optional apps: +- Observability +- Migration + +Platform apps: +- cert-manager +- MongoDB CRDs/operator +- ClickHouse operator +- Strimzi Kafka operator +- NGINX ingress +- Let’s Encrypt issuer + +## Before You Start + +Make sure these are already true: -## Apply Order +1. Argo CD is installed in the tools cluster. +2. `countly-bootstrap` exists and is healthy. +3. The target customer cluster is registered in Argo CD. +4. DNS for the customer hostname points to the ingress load balancer you expect to use. + +Helpful checks: ```bash -kubectl apply -f argocd/projects/customers.yaml -n argocd -kubectl apply -f argocd/applicationsets/ -n argocd +argocd app list +argocd cluster list ``` -Or bootstrap everything with one parent app: +## Add A New Customer + +### 1. Create the customer scaffold + +Run: ```bash -kubectl apply -f argocd/root-application.yaml -n argocd +./scripts/new-argocd-customer.sh ``` -## Generated Application Order +Example: -- Wave `-30` to `-24`: per-customer cert-manager, MongoDB CRDs/operator, ClickHouse operator, Strimzi operator, NGINX ingress, Let’s Encrypt ClusterIssuer -- Wave `0`: MongoDB, ClickHouse -- Wave `5`: Kafka -- Wave `10`: Countly -- Wave `15`: Observability - -## Add A New Customer Later +```bash +./scripts/new-argocd-customer.sh acme https://1.2.3.4 acme.count.ly +``` -1. Run: - ```bash - ./scripts/new-argocd-customer.sh - ``` -2. Fill in `environments//secrets-*.yaml`. -3. Adjust any customer-specific overrides in `environments//*.yaml`. -4. Commit and let ArgoCD reconcile. +This creates: -Only two Git-managed inputs are required per new customer: +- `argocd/customers/.yaml` - `environments//` + +### 2. Edit the customer metadata + +File: + - `argocd/customers/.yaml` -Customer metadata is the source of truth for: +This file is the source of truth for: + - `server` - `hostname` - `sizing` @@ -73,5 +101,167 @@ Customer metadata is the source of truth for: - `kafkaConnect` - `migration` -Do not set `ingress.hostname` or `ingress.tls.mode` in `environments//countly.yaml`. -Those are driven from the customer metadata file and passed explicitly by the Countly ApplicationSet. +Typical example: + +```yaml +customer: acme +environment: acme +project: countly-customers +server: https://1.2.3.4 +hostname: acme.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled +``` + +### 3. Fill in the customer secrets + +Files to review: + +- `environments//secrets-countly.yaml` +- `environments//secrets-clickhouse.yaml` +- `environments//secrets-kafka.yaml` +- `environments//secrets-mongodb.yaml` +- `environments//secrets-observability.yaml` +- `environments//secrets-migration.yaml` + +For direct-value deployments: + +- set `secrets.mode: values` where used +- fill in the real passwords and secrets +- keep matching passwords consistent across Countly, ClickHouse, Kafka, and MongoDB + +For external secret deployments: + +- use your external secret setup instead of committing direct values + +## Important Rules + +### Customer metadata wins + +The customer file in `argocd/customers/` is the source of truth for: + +- cluster destination +- domain +- sizing +- TLS mode +- observability mode +- migration mode + +### Do not set these in `environments//countly.yaml` + +Do not manually set: + +- `ingress.hostname` +- `ingress.tls.mode` + +These are passed from customer metadata by the Countly `ApplicationSet`. + +### Kafka when migration is disabled + +If `migration: disabled`, make sure the drill ClickHouse sink connector is not enabled in: + +- `environments//kafka.yaml` + +This avoids creating a Kafka connector that depends on migration-owned tables. + +## Commit And Deploy + +After the customer files are ready: + +```bash +git add argocd/customers/.yaml environments/ +git commit -m "Add customer" +git push origin +``` + +Then tell Argo CD to pick it up: + +```bash +argocd app get countly-bootstrap --refresh +argocd app sync countly-bootstrap +kubectl get applications -n argocd | grep +``` + +## Expected App Order + +The apps are designed to settle roughly in this order: + +1. Platform operators and ingress +2. MongoDB and ClickHouse +3. Kafka +4. Countly +5. Observability +6. Migration + +It is normal for some apps to show `Progressing` for a while during first rollout. + +## Quick Verification + +After sync, useful checks are: + +```bash +kubectl get applications -n argocd | grep +kubectl get pods -A +kubectl get ingress -n countly +kubectl get certificate -n countly +curl -Ik https:// +``` + +## Removing A Customer + +1. Delete: + - `argocd/customers/.yaml` + - `environments//` +2. Commit and push. +3. Sync `countly-bootstrap`. +4. Confirm the customer apps disappear from Argo CD. + +## Common Problems + +### Countly still renders `countly.example.com` + +Cause: +- stale customer env overrides, or the `countly-app` `ApplicationSet` has not refreshed yet + +Fix: +- sync `countly-bootstrap` +- make sure the generated Countly app includes `ingress.hostname` and `ingress.tls.mode` + +### Kafka fails because of the drill sink connector + +Cause: +- migration is disabled, but the connector is still enabled + +Fix: +- disable `ch-sink-drill-events` in `environments//kafka.yaml` + +### Bootstrap changes are not reaching generated apps + +Cause: +- `countly-bootstrap` was not refreshed or synced + +Fix: + +```bash +argocd app get countly-bootstrap --refresh +argocd app sync countly-bootstrap +``` + +## Recommended Workflow For Engineers + +For each new customer: + +1. Register the cluster in Argo CD. +2. Run the scaffold script. +3. Edit `argocd/customers/.yaml`. +4. Fill in `environments//secrets-*.yaml`. +5. Review `environments//kafka.yaml` if migration is disabled. +6. Commit and push. +7. Sync `countly-bootstrap`. +8. Verify the generated apps, ingress, and certificate. + +If you follow that flow, you should not need to manually create Argo CD apps one by one. From 47aab686088e02c52b93f283c69d3c913baacede Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 21:02:22 +0530 Subject: [PATCH 31/79] Add GAR image pull support for Argo customer deployments --- .gitignore | 3 + README.md | 6 + argocd/README.md | 13 + argocd/applicationsets/00-mongodb.yaml | 4 +- argocd/applicationsets/01-clickhouse.yaml | 4 +- argocd/applicationsets/02-kafka.yaml | 4 +- argocd/applicationsets/03-countly.yaml | 4 +- argocd/applicationsets/04-observability.yaml | 4 +- argocd/applicationsets/05-migration.yaml | 4 +- argocd/customers/gcr-argo.yaml | 16 + argocd/operators/00-cert-manager.yaml | 2 +- argocd/operators/01-mongodb-crds.yaml | 2 +- argocd/operators/02-mongodb-operator.yaml | 2 +- argocd/operators/03-clickhouse-operator.yaml | 2 +- argocd/operators/04-strimzi-operator.yaml | 2 +- argocd/operators/05-nginx-ingress.yaml | 4 +- .../06-letsencrypt-prod-issuer-app.yaml | 4 +- argocd/operators/08-cluster-secret-store.yaml | 48 ++ argocd/root-application.yaml | 2 +- .../countly-cluster-secret-store/Chart.yaml | 6 + .../templates/clustersecretstore.yaml | 16 + .../countly-cluster-secret-store/values.yaml | 9 + charts/countly-kafka/templates/_helpers.tpl | 23 + .../templates/external-secret-image-pull.yaml | 26 + .../countly-kafka/templates/kafkaconnect.yaml | 6 +- charts/countly-kafka/values.schema.json | 48 +- charts/countly-kafka/values.yaml | 13 + .../countly/templates/_countly-component.tpl | 19 +- charts/countly/templates/_helpers.tpl | 10 + .../templates/external-secret-image-pull.yaml | 26 + charts/countly/values.schema.json | 55 ++ charts/countly/values.yaml | 15 +- environments/example-production/global.yaml | 14 + environments/gcr-argo/README.md | 77 +++ environments/gcr-argo/clickhouse.yaml | 203 +++++++ .../cluster-secret-store.gcp.example.yaml | 31 + environments/gcr-argo/countly-tls.env | 7 + environments/gcr-argo/countly.yaml | 571 ++++++++++++++++++ .../gcr-argo/external-secrets.example.yaml | 50 ++ environments/gcr-argo/global.yaml | 39 ++ .../gcr-argo/image-pull-secrets.example.yaml | 41 ++ environments/gcr-argo/kafka.yaml | 341 +++++++++++ environments/gcr-argo/migration.yaml | 3 + environments/gcr-argo/mongodb.yaml | 144 +++++ environments/gcr-argo/observability.yaml | 437 ++++++++++++++ environments/gcr-argo/secrets-clickhouse.yaml | 4 + environments/gcr-argo/secrets-countly.yaml | 16 + environments/gcr-argo/secrets-kafka.yaml | 4 + environments/gcr-argo/secrets-migration.yaml | 2 + environments/gcr-argo/secrets-mongodb.yaml | 7 + .../gcr-argo/secrets-observability.yaml | 2 + environments/gcr-argo/secrets.example.yaml | 42 ++ .../gcr-argo/secrets.sops.example.yaml | 21 + environments/local/kafka.yaml | 1 + environments/reference/README.md | 30 +- .../cluster-secret-store.gcp.example.yaml | 31 + environments/reference/countly.yaml | 3 +- .../reference/external-secrets.example.yaml | 14 + environments/reference/global.yaml | 12 + .../reference/image-pull-secrets.example.yaml | 41 ++ environments/reference/kafka.yaml | 5 + scripts/new-argocd-customer.sh | 23 +- 62 files changed, 2578 insertions(+), 40 deletions(-) create mode 100644 argocd/customers/gcr-argo.yaml create mode 100644 argocd/operators/08-cluster-secret-store.yaml create mode 100644 charts/countly-cluster-secret-store/Chart.yaml create mode 100644 charts/countly-cluster-secret-store/templates/clustersecretstore.yaml create mode 100644 charts/countly-cluster-secret-store/values.yaml create mode 100644 charts/countly-kafka/templates/external-secret-image-pull.yaml create mode 100644 charts/countly/templates/external-secret-image-pull.yaml create mode 100644 environments/gcr-argo/README.md create mode 100644 environments/gcr-argo/clickhouse.yaml create mode 100644 environments/gcr-argo/cluster-secret-store.gcp.example.yaml create mode 100644 environments/gcr-argo/countly-tls.env create mode 100644 environments/gcr-argo/countly.yaml create mode 100644 environments/gcr-argo/external-secrets.example.yaml create mode 100644 environments/gcr-argo/global.yaml create mode 100644 environments/gcr-argo/image-pull-secrets.example.yaml create mode 100644 environments/gcr-argo/kafka.yaml create mode 100644 environments/gcr-argo/migration.yaml create mode 100644 environments/gcr-argo/mongodb.yaml create mode 100644 environments/gcr-argo/observability.yaml create mode 100644 environments/gcr-argo/secrets-clickhouse.yaml create mode 100644 environments/gcr-argo/secrets-countly.yaml create mode 100644 environments/gcr-argo/secrets-kafka.yaml create mode 100644 environments/gcr-argo/secrets-migration.yaml create mode 100644 environments/gcr-argo/secrets-mongodb.yaml create mode 100644 environments/gcr-argo/secrets-observability.yaml create mode 100644 environments/gcr-argo/secrets.example.yaml create mode 100644 environments/gcr-argo/secrets.sops.example.yaml create mode 100644 environments/reference/cluster-secret-store.gcp.example.yaml create mode 100644 environments/reference/image-pull-secrets.example.yaml diff --git a/.gitignore b/.gitignore index 8809778..1406193 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,9 @@ secrets-*.yaml # Exception: reference environment templates (contain no real secrets) !environments/reference/secrets-*.yaml +# Temporary test customer exception: allow gcr-argo secrets to be committed +!environments/gcr-argo/secrets-*.yaml + # Helmfile state helmfile.lock .helmfile/ diff --git a/README.md b/README.md index 2926655..84e64ca 100644 --- a/README.md +++ b/README.md @@ -156,6 +156,8 @@ Install required operators before deploying Countly. See [docs/PREREQUISITES.md] - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - Choose `global.security`: `open` or `hardened` + - Keep `global.imageSource.mode: direct` for the current direct-pull flow, or switch to `gcpArtifactRegistry` and set `global.imageSource.gcpArtifactRegistry.repositoryPrefix` + - Set `global.imagePullSecrets` when pulling from a private registry such as GAR 3. **Fill in required secrets** in the chart-specific files. See `environments/reference/secrets.example.yaml` for a complete reference. @@ -172,6 +174,10 @@ Install required operators before deploying Countly. See [docs/PREREQUISITES.md] helmfile -e my-deployment apply ``` +For a GAR-backed production example, see [environments/example-production/global.yaml](/Users/admin/cly/helm/environments/example-production/global.yaml) and replace `countly-gar` with your Kubernetes docker-registry secret name. +For GitOps-managed pull secrets, start from [environments/reference/image-pull-secrets.example.yaml](/Users/admin/cly/helm/environments/reference/image-pull-secrets.example.yaml) and encrypt or template it before committing. +For Secret Manager + External Secrets Operator, set `global.imagePullSecretExternalSecret` in your environment `global.yaml` so Countly and Kafka Connect each create their own namespaced `dockerconfigjson` pull secret. + ### GitOps Customer Onboarding For Argo CD managed deployments, scaffold a new customer/cluster with: diff --git a/argocd/README.md b/argocd/README.md index 31eacea..924d575 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -46,6 +46,7 @@ Platform apps: - Strimzi Kafka operator - NGINX ingress - Let’s Encrypt issuer +- ClusterSecretStore for Google Secret Manager ## Before You Start @@ -93,6 +94,11 @@ File: This file is the source of truth for: - `server` +- `gcpServiceAccountEmail` +- `secretManagerProjectID` +- `clusterProjectID` +- `clusterName` +- `clusterLocation` - `hostname` - `sizing` - `security` @@ -108,6 +114,11 @@ customer: acme environment: acme project: countly-customers server: https://1.2.3.4 +gcpServiceAccountEmail: eso-acme@my-project.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: acme-prod +clusterLocation: us-central1 hostname: acme.count.ly sizing: tier1 security: open @@ -137,6 +148,8 @@ For direct-value deployments: For external secret deployments: - use your external secret setup instead of committing direct values +- set `gcpServiceAccountEmail` in the customer metadata so the per-customer External Secrets operator can use Workload Identity +- for GAR image pulls, store Docker config JSON in Google Secret Manager and point `global.imagePullSecretExternalSecret.remoteRef.key` to that secret ## Important Rules diff --git a/argocd/applicationsets/00-mongodb.yaml b/argocd/applicationsets/00-mongodb.yaml index a842718..c780cf1 100644 --- a/argocd/applicationsets/00-mongodb.yaml +++ b/argocd/applicationsets/00-mongodb.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui + targetRevision: gcp-artifact-rep-integration path: charts/countly-mongodb helm: releaseName: countly-mongodb diff --git a/argocd/applicationsets/01-clickhouse.yaml b/argocd/applicationsets/01-clickhouse.yaml index 4b3ae40..2126c0c 100644 --- a/argocd/applicationsets/01-clickhouse.yaml +++ b/argocd/applicationsets/01-clickhouse.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui + targetRevision: gcp-artifact-rep-integration path: charts/countly-clickhouse helm: releaseName: countly-clickhouse diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml index dbf3691..9b2d6d4 100644 --- a/argocd/applicationsets/02-kafka.yaml +++ b/argocd/applicationsets/02-kafka.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui + targetRevision: gcp-artifact-rep-integration path: charts/countly-kafka helm: releaseName: countly-kafka diff --git a/argocd/applicationsets/03-countly.yaml b/argocd/applicationsets/03-countly.yaml index 328fbad..bf6b728 100644 --- a/argocd/applicationsets/03-countly.yaml +++ b/argocd/applicationsets/03-countly.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui + targetRevision: gcp-artifact-rep-integration path: charts/countly helm: releaseName: countly diff --git a/argocd/applicationsets/04-observability.yaml b/argocd/applicationsets/04-observability.yaml index 4a9eca7..4e7deff 100644 --- a/argocd/applicationsets/04-observability.yaml +++ b/argocd/applicationsets/04-observability.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui + targetRevision: gcp-artifact-rep-integration path: '{{ if eq .observability "disabled" }}charts/noop{{ else }}charts/countly-observability{{ end }}' helm: releaseName: countly-observability diff --git a/argocd/applicationsets/05-migration.yaml b/argocd/applicationsets/05-migration.yaml index 9b97e45..59adc7c 100644 --- a/argocd/applicationsets/05-migration.yaml +++ b/argocd/applicationsets/05-migration.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui + targetRevision: gcp-artifact-rep-integration path: '{{ if eq .migration "enabled" }}charts/countly-migration{{ else }}charts/noop{{ end }}' helm: releaseName: countly-migration diff --git a/argocd/customers/gcr-argo.yaml b/argocd/customers/gcr-argo.yaml new file mode 100644 index 0000000..c662d03 --- /dev/null +++ b/argocd/customers/gcr-argo.yaml @@ -0,0 +1,16 @@ +customer: gcr-argo +environment: gcr-argo +project: countly-customers +server: https://34.60.231.37 +gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: gcr-argo +clusterLocation: us-central1-a +hostname: gcr-argo.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/argocd/operators/00-cert-manager.yaml b/argocd/operators/00-cert-manager.yaml index 93b6502..173a215 100644 --- a/argocd/operators/00-cert-manager.yaml +++ b/argocd/operators/00-cert-manager.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/01-mongodb-crds.yaml b/argocd/operators/01-mongodb-crds.yaml index 01aff45..71ae1de 100644 --- a/argocd/operators/01-mongodb-crds.yaml +++ b/argocd/operators/01-mongodb-crds.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/02-mongodb-operator.yaml b/argocd/operators/02-mongodb-operator.yaml index 730ee95..4e3ae6b 100644 --- a/argocd/operators/02-mongodb-operator.yaml +++ b/argocd/operators/02-mongodb-operator.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/03-clickhouse-operator.yaml b/argocd/operators/03-clickhouse-operator.yaml index 099b9cc..6a289c0 100644 --- a/argocd/operators/03-clickhouse-operator.yaml +++ b/argocd/operators/03-clickhouse-operator.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/04-strimzi-operator.yaml b/argocd/operators/04-strimzi-operator.yaml index 58b3805..a49f917 100644 --- a/argocd/operators/04-strimzi-operator.yaml +++ b/argocd/operators/04-strimzi-operator.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/05-nginx-ingress.yaml b/argocd/operators/05-nginx-ingress.yaml index 3510b6e..2b43ab0 100644 --- a/argocd/operators/05-nginx-ingress.yaml +++ b/argocd/operators/05-nginx-ingress.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: @@ -29,7 +29,7 @@ spec: valueFiles: - $values/nginx-ingress-values.yaml - repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui + targetRevision: gcp-artifact-rep-integration ref: values destination: server: "{{ .server }}" diff --git a/argocd/operators/06-letsencrypt-prod-issuer-app.yaml b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml index 9a10404..19eb7f0 100644 --- a/argocd/operators/06-letsencrypt-prod-issuer-app.yaml +++ b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: deploy-test-ui + revision: gcp-artifact-rep-integration files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: default source: repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui + targetRevision: gcp-artifact-rep-integration path: argocd/operator-manifests/letsencrypt-prod-issuer directory: recurse: true diff --git a/argocd/operators/08-cluster-secret-store.yaml b/argocd/operators/08-cluster-secret-store.yaml new file mode 100644 index 0000000..cdfeff4 --- /dev/null +++ b/argocd/operators/08-cluster-secret-store.yaml @@ -0,0 +1,48 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: customer-cluster-secret-store + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: gcp-artifact-rep-integration + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-cluster-secret-store" + annotations: + argocd.argoproj.io/sync-wave: "-22" + spec: + project: default + source: + repoURL: https://github.com/Countly/helm.git + targetRevision: gcp-artifact-rep-integration + path: charts/countly-cluster-secret-store + helm: + releaseName: countly-cluster-secret-store + parameters: + - name: secretStore.name + value: "gcp-secrets" + - name: secretStore.secretManagerProjectID + value: "{{ .secretManagerProjectID }}" + - name: secretStore.clusterProjectID + value: "{{ .clusterProjectID }}" + - name: secretStore.clusterName + value: "{{ .clusterName }}" + - name: secretStore.clusterLocation + value: "{{ .clusterLocation }}" + destination: + server: "{{ .server }}" + namespace: external-secrets + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - ServerSideApply=true diff --git a/argocd/root-application.yaml b/argocd/root-application.yaml index 6e6e80a..2740352 100644 --- a/argocd/root-application.yaml +++ b/argocd/root-application.yaml @@ -7,7 +7,7 @@ spec: project: default source: repoURL: https://github.com/Countly/helm.git - targetRevision: deploy-test-ui + targetRevision: gcp-artifact-rep-integration path: argocd directory: recurse: true diff --git a/charts/countly-cluster-secret-store/Chart.yaml b/charts/countly-cluster-secret-store/Chart.yaml new file mode 100644 index 0000000..219c646 --- /dev/null +++ b/charts/countly-cluster-secret-store/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: countly-cluster-secret-store +description: ClusterSecretStore for External Secrets Operator with GCP Secret Manager +type: application +version: 0.1.0 +appVersion: "1.0" diff --git a/charts/countly-cluster-secret-store/templates/clustersecretstore.yaml b/charts/countly-cluster-secret-store/templates/clustersecretstore.yaml new file mode 100644 index 0000000..36f75dd --- /dev/null +++ b/charts/countly-cluster-secret-store/templates/clustersecretstore.yaml @@ -0,0 +1,16 @@ +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: {{ .Values.secretStore.name }} +spec: + provider: + gcpsm: + projectID: {{ required "secretStore.secretManagerProjectID is required" .Values.secretStore.secretManagerProjectID | quote }} + auth: + workloadIdentity: + clusterProjectID: {{ required "secretStore.clusterProjectID is required" .Values.secretStore.clusterProjectID | quote }} + clusterName: {{ required "secretStore.clusterName is required" .Values.secretStore.clusterName | quote }} + clusterLocation: {{ required "secretStore.clusterLocation is required" .Values.secretStore.clusterLocation | quote }} + serviceAccountRef: + name: {{ .Values.secretStore.serviceAccountRef.name | quote }} + namespace: {{ .Values.secretStore.serviceAccountRef.namespace | quote }} diff --git a/charts/countly-cluster-secret-store/values.yaml b/charts/countly-cluster-secret-store/values.yaml new file mode 100644 index 0000000..7b72520 --- /dev/null +++ b/charts/countly-cluster-secret-store/values.yaml @@ -0,0 +1,9 @@ +secretStore: + name: gcp-secrets + secretManagerProjectID: "" + clusterProjectID: "" + clusterName: "" + clusterLocation: "" + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/charts/countly-kafka/templates/_helpers.tpl b/charts/countly-kafka/templates/_helpers.tpl index f51a958..2bd3fc1 100644 --- a/charts/countly-kafka/templates/_helpers.tpl +++ b/charts/countly-kafka/templates/_helpers.tpl @@ -97,3 +97,26 @@ ClickHouse Connect secret name {{ .Values.kafkaConnect.clickhouse.secretName }} {{- end -}} {{- end -}} + +{{/* +Resolve the Kafka Connect image based on the selected source mode. +*/}} +{{- define "countly-kafka.connectImage" -}} +{{- $mode := .Values.global.imageSource.mode | default "direct" -}} +{{- if eq $mode "gcpArtifactRegistry" -}} +{{- $prefix := required "global.imageSource.gcpArtifactRegistry.repositoryPrefix is required when global.imageSource.mode is gcpArtifactRegistry" .Values.global.imageSource.gcpArtifactRegistry.repositoryPrefix -}} +{{- printf "%s/%s" ($prefix | trimSuffix "/") .Values.kafkaConnect.artifactImage -}} +{{- else -}} +{{- .Values.kafkaConnect.image -}} +{{- end -}} +{{- end -}} + +{{/* +Resolve the first configured imagePullSecret name. +*/}} +{{- define "countly-kafka.imagePullSecretName" -}} +{{- $pullSecrets := .Values.global.imagePullSecrets | default list -}} +{{- if gt (len $pullSecrets) 0 -}} +{{- (index $pullSecrets 0).name -}} +{{- end -}} +{{- end -}} diff --git a/charts/countly-kafka/templates/external-secret-image-pull.yaml b/charts/countly-kafka/templates/external-secret-image-pull.yaml new file mode 100644 index 0000000..4cdcebf --- /dev/null +++ b/charts/countly-kafka/templates/external-secret-image-pull.yaml @@ -0,0 +1,26 @@ +{{- if .Values.global.imagePullSecretExternalSecret.enabled }} +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: {{ required "global.imagePullSecrets[0].name is required when global.imagePullSecretExternalSecret.enabled is true" (include "countly-kafka.imagePullSecretName" .) }} + labels: + {{- include "countly-kafka.labels" . | nindent 4 }} + {{- if .Values.argocd.enabled }} + annotations: + {{- include "countly-kafka.syncWave" (dict "wave" "0" "root" .) | nindent 4 }} + {{- end }} +spec: + refreshInterval: {{ .Values.global.imagePullSecretExternalSecret.refreshInterval | default "1h" }} + secretStoreRef: + name: {{ required "global.imagePullSecretExternalSecret.secretStoreRef.name is required when global.imagePullSecretExternalSecret.enabled is true" .Values.global.imagePullSecretExternalSecret.secretStoreRef.name }} + kind: {{ .Values.global.imagePullSecretExternalSecret.secretStoreRef.kind | default "ClusterSecretStore" }} + target: + name: {{ required "global.imagePullSecrets[0].name is required when global.imagePullSecretExternalSecret.enabled is true" (include "countly-kafka.imagePullSecretName" .) }} + creationPolicy: Owner + template: + type: kubernetes.io/dockerconfigjson + data: + - secretKey: .dockerconfigjson + remoteRef: + key: {{ required "global.imagePullSecretExternalSecret.remoteRef.key is required when global.imagePullSecretExternalSecret.enabled is true" .Values.global.imagePullSecretExternalSecret.remoteRef.key }} +{{- end }} diff --git a/charts/countly-kafka/templates/kafkaconnect.yaml b/charts/countly-kafka/templates/kafkaconnect.yaml index fc6994c..0485288 100644 --- a/charts/countly-kafka/templates/kafkaconnect.yaml +++ b/charts/countly-kafka/templates/kafkaconnect.yaml @@ -20,7 +20,7 @@ spec: name: {{ include "countly-kafka.fullname" . }}-metrics key: connect-metrics-config.yml {{- end }} - image: {{ .Values.kafkaConnect.image }} + image: {{ include "countly-kafka.connectImage" . | quote }} groupId: {{ .Values.kafkaConnect.workerConfig | dig "group.id" "connect-cluster" }} offsetStorageTopic: {{ .Values.kafkaConnect.workerConfig | dig "offset.storage.topic" "connect-offsets" }} configStorageTopic: {{ .Values.kafkaConnect.workerConfig | dig "config.storage.topic" "connect-configs" }} @@ -36,6 +36,10 @@ spec: {{- end }} template: pod: + {{- with .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} {{- $scheduling := .Values.kafkaConnect.scheduling | default dict }} {{- $antiAffinity := $scheduling.antiAffinity | default dict }} {{- if (and (hasKey $antiAffinity "enabled") $antiAffinity.enabled) }} diff --git a/charts/countly-kafka/values.schema.json b/charts/countly-kafka/values.schema.json index d282159..1e7d7da 100644 --- a/charts/countly-kafka/values.schema.json +++ b/charts/countly-kafka/values.schema.json @@ -3,7 +3,48 @@ "type": "object", "required": ["version"], "properties": { - "global": { "type": "object" }, + "global": { + "type": "object", + "properties": { + "imageRegistry": { "type": "string" }, + "imagePullSecrets": { "type": "array" }, + "imagePullSecretExternalSecret": { + "type": "object", + "properties": { + "enabled": { "type": "boolean" }, + "refreshInterval": { "type": "string" }, + "secretStoreRef": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "kind": { "type": "string" } + } + }, + "remoteRef": { + "type": "object", + "properties": { + "key": { "type": "string" } + } + } + } + }, + "imageSource": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": ["direct", "gcpArtifactRegistry"] + }, + "gcpArtifactRegistry": { + "type": "object", + "properties": { + "repositoryPrefix": { "type": "string" } + } + } + } + } + } + }, "createNamespace": { "type": "boolean" }, "strimzi": { "type": "object", @@ -75,6 +116,11 @@ "enabled": { "type": "boolean" }, "name": { "type": "string" }, "image": { "type": "string", "minLength": 1 }, + "artifactImage": { + "type": "string", + "minLength": 1, + "description": "Image name appended to global.imageSource.gcpArtifactRegistry.repositoryPrefix when using GCP Artifact Registry" + }, "replicas": { "type": "integer", "minimum": 1 }, "bootstrapServers": { "type": "string" }, "resources": { "type": "object" }, diff --git a/charts/countly-kafka/values.yaml b/charts/countly-kafka/values.yaml index 974a6d6..b7d6eb0 100644 --- a/charts/countly-kafka/values.yaml +++ b/charts/countly-kafka/values.yaml @@ -1,5 +1,17 @@ global: imageRegistry: "" + imageSource: + mode: direct + gcpArtifactRegistry: + repositoryPrefix: "" + imagePullSecretExternalSecret: + enabled: false + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRef: + key: "" imagePullSecrets: [] storageClass: "" sizing: small @@ -104,6 +116,7 @@ kafkaConnect: enabled: true name: connect-ch image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" replicas: 2 bootstrapServers: "" resources: diff --git a/charts/countly/templates/_countly-component.tpl b/charts/countly/templates/_countly-component.tpl index 7ec4119..1a7df21 100644 --- a/charts/countly/templates/_countly-component.tpl +++ b/charts/countly/templates/_countly-component.tpl @@ -60,7 +60,7 @@ spec: type: RuntimeDefault containers: - name: {{ $component }} - image: "{{ if $root.Values.global.imageRegistry }}{{ $root.Values.global.imageRegistry }}/{{ end }}{{ $root.Values.image.repository }}{{ if $root.Values.image.digest }}@{{ $root.Values.image.digest }}{{ else }}:{{ $root.Values.image.tag | default $root.Chart.AppVersion }}{{ end }}" + image: "{{ include "countly.image" $root }}{{ if $root.Values.image.digest }}@{{ $root.Values.image.digest }}{{ else }}:{{ $root.Values.image.tag | default $root.Chart.AppVersion }}{{ end }}" imagePullPolicy: {{ $root.Values.image.pullPolicy }} securityContext: runAsNonRoot: true @@ -173,6 +173,23 @@ spec: {{- end }} {{- end -}} +{{/* +Resolve the Countly image repository based on the selected source mode. +*/}} +{{- define "countly.image" -}} +{{- $mode := .Values.global.imageSource.mode | default "direct" -}} +{{- if eq $mode "gcpArtifactRegistry" -}} +{{- $prefix := required "global.imageSource.gcpArtifactRegistry.repositoryPrefix is required when global.imageSource.mode is gcpArtifactRegistry" .Values.global.imageSource.gcpArtifactRegistry.repositoryPrefix -}} +{{- printf "%s/%s" ($prefix | trimSuffix "/") .Values.image.artifactRepository -}} +{{- else -}} +{{- if .Values.global.imageRegistry -}} +{{- printf "%s/%s" (.Values.global.imageRegistry | trimSuffix "/") .Values.image.repository -}} +{{- else -}} +{{- .Values.image.repository -}} +{{- end -}} +{{- end -}} +{{- end -}} + {{/* Service for a Countly component */}} diff --git a/charts/countly/templates/_helpers.tpl b/charts/countly/templates/_helpers.tpl index 639492b..341668a 100644 --- a/charts/countly/templates/_helpers.tpl +++ b/charts/countly/templates/_helpers.tpl @@ -193,3 +193,13 @@ ArgoCD sync-wave annotation (only when argocd.enabled). argocd.argoproj.io/sync-wave: {{ .wave | quote }} {{- end }} {{- end -}} + +{{/* +Resolve the first configured imagePullSecret name. +*/}} +{{- define "countly.imagePullSecretName" -}} +{{- $pullSecrets := .Values.global.imagePullSecrets | default list -}} +{{- if gt (len $pullSecrets) 0 -}} +{{- (index $pullSecrets 0).name -}} +{{- end -}} +{{- end -}} diff --git a/charts/countly/templates/external-secret-image-pull.yaml b/charts/countly/templates/external-secret-image-pull.yaml new file mode 100644 index 0000000..8f64b39 --- /dev/null +++ b/charts/countly/templates/external-secret-image-pull.yaml @@ -0,0 +1,26 @@ +{{- if .Values.global.imagePullSecretExternalSecret.enabled }} +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: {{ required "global.imagePullSecrets[0].name is required when global.imagePullSecretExternalSecret.enabled is true" (include "countly.imagePullSecretName" .) }} + labels: + {{- include "countly.labels" . | nindent 4 }} + {{- if .Values.argocd.enabled }} + annotations: + {{- include "countly.syncWave" (dict "wave" "0" "root" .) | nindent 4 }} + {{- end }} +spec: + refreshInterval: {{ .Values.global.imagePullSecretExternalSecret.refreshInterval | default "1h" }} + secretStoreRef: + name: {{ required "global.imagePullSecretExternalSecret.secretStoreRef.name is required when global.imagePullSecretExternalSecret.enabled is true" .Values.global.imagePullSecretExternalSecret.secretStoreRef.name }} + kind: {{ .Values.global.imagePullSecretExternalSecret.secretStoreRef.kind | default "ClusterSecretStore" }} + target: + name: {{ required "global.imagePullSecrets[0].name is required when global.imagePullSecretExternalSecret.enabled is true" (include "countly.imagePullSecretName" .) }} + creationPolicy: Owner + template: + type: kubernetes.io/dockerconfigjson + data: + - secretKey: .dockerconfigjson + remoteRef: + key: {{ required "global.imagePullSecretExternalSecret.remoteRef.key is required when global.imagePullSecretExternalSecret.enabled is true" .Values.global.imagePullSecretExternalSecret.remoteRef.key }} +{{- end }} diff --git a/charts/countly/values.schema.json b/charts/countly/values.schema.json index 56d4381..31a0813 100644 --- a/charts/countly/values.schema.json +++ b/charts/countly/values.schema.json @@ -11,6 +11,56 @@ "imageRegistry": { "type": "string" }, + "imageSource": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": [ + "direct", + "gcpArtifactRegistry" + ] + }, + "gcpArtifactRegistry": { + "type": "object", + "properties": { + "repositoryPrefix": { + "type": "string" + } + } + } + } + }, + "imagePullSecretExternalSecret": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "refreshInterval": { + "type": "string" + }, + "secretStoreRef": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "kind": { + "type": "string" + } + } + }, + "remoteRef": { + "type": "object", + "properties": { + "key": { + "type": "string" + } + } + } + } + }, "imagePullSecrets": { "type": "array" }, @@ -41,6 +91,11 @@ "type": "string", "minLength": 1 }, + "artifactRepository": { + "type": "string", + "minLength": 1, + "description": "Repository path appended to global.imageSource.gcpArtifactRegistry.repositoryPrefix when using GCP Artifact Registry" + }, "digest": { "type": [ "string", diff --git a/charts/countly/values.yaml b/charts/countly/values.yaml index 28d06b0..203ccc1 100644 --- a/charts/countly/values.yaml +++ b/charts/countly/values.yaml @@ -1,5 +1,17 @@ global: imageRegistry: "" + imageSource: + mode: direct + gcpArtifactRegistry: + repositoryPrefix: "" + imagePullSecretExternalSecret: + enabled: false + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRef: + key: "" imagePullSecrets: [] storageClass: "" sizing: small @@ -22,7 +34,8 @@ serviceAccount: image: repository: gcr.io/countly-dev-313620/countly-unified - digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" + artifactRepository: countly-unified + digest: "sha256:b42efb9713ee11d173fe409924fb9e2a208b5c0beafed9e42f349b996b6650a4" tag: "26.01" pullPolicy: IfNotPresent diff --git a/environments/example-production/global.yaml b/environments/example-production/global.yaml index ea3c15b..500e3e9 100644 --- a/environments/example-production/global.yaml +++ b/environments/example-production/global.yaml @@ -6,6 +6,20 @@ global: tls: letsencrypt observability: full kafkaConnect: balanced + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: us-docker.pkg.dev/countly-01/countly-unified + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRef: + key: customer-a-gar-dockerconfig + imagePullSecrets: + - name: countly-gar storageClass: gp3 ingress: diff --git a/environments/gcr-argo/README.md b/environments/gcr-argo/README.md new file mode 100644 index 0000000..a2a0335 --- /dev/null +++ b/environments/gcr-argo/README.md @@ -0,0 +1,77 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + +3. Fill in required secrets in the chart-specific files: + - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `clickhouse.yaml` → `auth.defaultUserPassword.password` + - `kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `secrets-mongodb.yaml` | MongoDB user passwords | +| `secrets-clickhouse.yaml` | ClickHouse auth password | +| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | +| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/gcr-argo/clickhouse.yaml b/environments/gcr-argo/clickhouse.yaml new file mode 100644 index 0000000..d7899d3 --- /dev/null +++ b/environments/gcr-argo/clickhouse.yaml @@ -0,0 +1,203 @@ +# ============================================================================= +# ClickHouse Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-clickhouse/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Operator API Version --- +clickhouseOperator: + apiVersion: clickhouse.com/v1alpha1 + +# ============================================================================= +# Cluster Topology +# ============================================================================= +version: "26.2" +shards: 1 +replicas: 2 + +# ============================================================================= +# Images +# ============================================================================= +image: + server: clickhouse/clickhouse-server + keeper: clickhouse/clickhouse-keeper + +# ============================================================================= +# Database +# ============================================================================= +database: countly_drill + +# ============================================================================= +# Authentication +# ============================================================================= +auth: + # --- Default User Password --- + defaultUserPassword: + existingSecret: "" # Use an existing secret instead of creating one + secretName: clickhouse-default-password + key: password + password: "" # REQUIRED: ClickHouse default user password + + # --- Admin User (optional, separate from default) --- + adminUser: + enabled: false + # Precomputed SHA256 hex of the admin password (64 hex chars). + # Generate: echo -n 'your_password' | sha256sum | cut -d' ' -f1 + passwordSha256Hex: "" + +# ============================================================================= +# OpenTelemetry Server-Side Tracing +# ============================================================================= +# When enabled, ClickHouse logs spans to system.opentelemetry_span_log for +# queries arriving with W3C traceparent headers. +opentelemetry: + enabled: false + spanLog: + ttlDays: 7 + flushIntervalMs: 1000 + +# ============================================================================= +# Server +# ============================================================================= +server: + securityContext: + runAsNonRoot: true + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + + resources: + requests: + cpu: "1" + memory: "4Gi" + limits: + cpu: "2" + memory: "8Gi" + + persistence: + storageClass: "" + size: 50Gi + + settings: + maxConnections: 4096 + extraConfig: "" # Raw XML injected into server config + extraUsersConfig: "" # Raw XML injected into users config + prometheus: + enabled: true + port: 9363 + endpoint: /metrics + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 + +# ============================================================================= +# Keeper (ClickHouse Keeper for replication coordination) +# ============================================================================= +keeper: + replicas: 1 + + securityContext: + runAsNonRoot: true + runAsUser: 101 + runAsGroup: 101 + fsGroup: 101 + + resources: + requests: + cpu: "250m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "1Gi" + + persistence: + storageClass: "" + size: 5Gi + + settings: + prometheus: + enabled: true + port: 9090 + endpoint: /metrics + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + +# ============================================================================= +# Pod Disruption Budgets +# ============================================================================= +podDisruptionBudget: + server: + enabled: false + maxUnavailable: 1 + keeper: + enabled: false + maxUnavailable: 1 + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + - kafka + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-analytics-namespace + # ports: + # - port: 8123 + # protocol: TCP + +# ============================================================================= +# Service Monitor (Prometheus Operator CRD) +# ============================================================================= +serviceMonitor: + enabled: false + interval: "15s" + serviceType: headless # headless = per-pod scraping, clusterIP = any-pod + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall diff --git a/environments/gcr-argo/cluster-secret-store.gcp.example.yaml b/environments/gcr-argo/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..e0493b8 --- /dev/null +++ b/environments/gcr-argo/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/gcr-argo/countly-tls.env b/environments/gcr-argo/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/gcr-argo/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/gcr-argo/countly.yaml b/environments/gcr-argo/countly.yaml new file mode 100644 index 0000000..2df3d8e --- /dev/null +++ b/environments/gcr-argo/countly.yaml @@ -0,0 +1,571 @@ +# ============================================================================= +# Countly Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Service Account --- +serviceAccount: + create: true + name: "" # Auto-derived from release name if empty + annotations: {} + +# --- Image --- +image: + repository: gcr.io/countly-dev-313620/countly-unified + artifactRepository: countly-unified + digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" + tag: "26.01" # Fallback when digest is empty + pullPolicy: IfNotPresent + +# --- Cross-Namespace References --- +clickhouseNamespace: clickhouse +kafkaNamespace: kafka +mongodbNamespace: mongodb + +# ============================================================================= +# Component: API +# ============================================================================= +api: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:api"] + port: 3001 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 120 + terminationGracePeriodSeconds: 120 + resources: + requests: + cpu: "1" + memory: "3.5Gi" + limits: + cpu: "1" + memory: "4Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 6 + metrics: + cpu: + averageUtilization: 70 + memory: + averageUtilization: 80 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 # 1-100, only used with type=preferred + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Frontend +# ============================================================================= +frontend: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:frontend"] + port: 6001 + healthCheck: + path: /ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + resources: + requests: + cpu: "1" + memory: "2.5Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 1 + metrics: + cpu: + averageUtilization: 80 + memory: {} + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Ingestor +# ============================================================================= +ingestor: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:ingestor"] + port: 3010 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 120 + terminationGracePeriodSeconds: 120 + resources: + requests: + cpu: "1" + memory: "3Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 12 + metrics: + cpu: + averageUtilization: 65 + memory: + averageUtilization: 75 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Aggregator +# ============================================================================= +aggregator: + enabled: true + replicaCount: 4 + command: ["npm", "run", "start:aggregator"] + port: 0 # No HTTP port exposed + healthCheck: {} # No HTTP health check (no port) + terminationGracePeriodSeconds: 60 + resources: + requests: + cpu: "1" + memory: "3.5Gi" + limits: + cpu: "2" + memory: "4Gi" + hpa: + enabled: true + minReplicas: 4 + maxReplicas: 8 + metrics: + cpu: + averageUtilization: 65 + memory: + averageUtilization: 65 + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 10 + periodSeconds: 60 + pdb: + enabled: true + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Component: Job Server +# ============================================================================= +jobserver: + enabled: true + replicaCount: 1 + command: ["npm", "run", "start:jobserver"] + port: 3020 + healthCheck: + path: /o/ping + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 5 + terminationGracePeriodSeconds: 30 + resources: + requests: + cpu: "1" + memory: "3Gi" + limits: + cpu: "1" + memory: "3Gi" + hpa: + enabled: true + minReplicas: 1 + maxReplicas: 1 + metrics: + cpu: + averageUtilization: 80 + memory: + averageUtilization: 85 + behavior: {} + pdb: + enabled: false + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + antiAffinity: + enabled: false + extraEnv: [] + extraEnvFrom: [] + +# ============================================================================= +# Configuration (ConfigMaps) +# ============================================================================= +config: + # --- Common (shared by all components) --- + common: + NODE_ENV: production + COUNTLY_PLUGINS: "mobile,web,desktop,plugins,density,locale,browser,sources,views,logger,systemlogs,populator,reports,crashes,push,star-rating,slipping-away-users,compare,server-stats,dbviewer,crash_symbolication,crash-analytics,alerts,onboarding,consolidate,remote-config,hooks,dashboards,sdk,data-manager,guides,heatmaps,retention_segments,formulas,funnels,cohorts,ab-testing,performance-monitoring,config-transfer,data-migration,two-factor-auth,blocking,concurrent_users,revenue,activity-map,flows,surveys,event-timeline,drill,multi,active_users,ip-blocker,kafka,clickhouse" + COUNTLY_CONFIG__FILESTORAGE: gridfs + COUNTLY_CONFIG__DRILL_EVENTS_DRIVER: clickhouse + COUNTLY_CONFIG__SHARED_CONNECTION: "true" + COUNTLY_CONFIG__DATABASE_ADAPTERPREFERENCE: '["clickhouse","mongodb"]' + COUNTLY_CONFIG__DATABASE_ADAPTERS_MONGODB_ENABLED: "true" + COUNTLY_CONFIG__DATABASE_ADAPTERS_CLICKHOUSE_ENABLED: "true" + COUNTLY_CONFIG__DATABASE_FAILONCONNECTIONERROR: "true" + COUNTLY_CONFIG__EVENTSINK_SINKS: '["kafka"]' + COUNTLY_CONFIG__RELOADCONFIGAFTER: "10000" + + # --- API --- + api: + COUNTLY_CONTAINER: api + COUNTLY_CONFIG__API_PORT: "3001" + COUNTLY_CONFIG__API_HOST: "0.0.0.0" + COUNTLY_CONFIG__API_MAX_SOCKETS: "1024" + COUNTLY_CONFIG__API_MAX_UPLOAD_FILE_SIZE: "209715200" # 200 MiB + COUNTLY_CONFIG__API_TIMEOUT: "120000" # ms + + # --- Frontend --- + frontend: + COUNTLY_CONTAINER: frontend + COUNTLY_CONFIG__WEB_PORT: "6001" + COUNTLY_CONFIG__WEB_HOST: "0.0.0.0" + COUNTLY_CONFIG__WEB_SECURE_COOKIES: "false" + COUNTLY_CONFIG__COOKIE_MAXAGE: "86400000" # 24 hours in ms + + # --- Ingestor --- + ingestor: + COUNTLY_CONTAINER: ingestor + COUNTLY_CONFIG__INGESTOR_PORT: "3010" + COUNTLY_CONFIG__INGESTOR_HOST: "0.0.0.0" + + # --- Aggregator --- + aggregator: + COUNTLY_CONTAINER: aggregator + UV_THREADPOOL_SIZE: "6" + + # --- Job Server --- + jobserver: + COUNTLY_CONTAINER: jobserver + COUNTLY_CONFIG__JOBSERVER_PORT: "3020" + COUNTLY_CONFIG__JOBSERVER_HOST: "0.0.0.0" + + # --- ClickHouse Connection --- + clickhouse: + COUNTLY_CONFIG__CLICKHOUSE_QUERYOPTIONS_MAX_EXECUTION_TIME: "600" + COUNTLY_CONFIG__CLICKHOUSE_REQUEST_TIMEOUT: "1200000" # ms + COUNTLY_CONFIG__CLICKHOUSE_MAX_OPEN_CONNECTIONS: "10" + COUNTLY_CONFIG__CLICKHOUSE_APPLICATION: countly_drill + COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_REQUEST: "false" + COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_RESPONSE: "false" + COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_ENABLED: "true" + COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_IDLE_SOCKET_TTL: "10000" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_NAME: countly_cluster + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_SHARDS: "false" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_REPLICAS: "false" + COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_ISCLOUD: "false" + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_COORDINATORTYPE: keeper + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_ZKPATH: "/clickhouse/tables/{shard}/{database}/{table}" + COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_REPLICANAME: "{replica}" + COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_ENABLED: "false" + COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_MAXPARALLELREPLICAS: "2" + COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_WRITETHROUGH: "true" + COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_INSERTDISTRIBUTEDSYNC: "true" + COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_NATIVEPORT: "9000" + COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_SECURE: "false" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_DAYSOLD: "30" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MIN: "60" + COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MAX: "120" + + # --- Kafka Connection --- + kafka: + COUNTLY_CONFIG__KAFKA_ENABLED: "true" + COUNTLY_CONFIG__KAFKA_DRILLEVENTSTOPIC: drill-events + COUNTLY_CONFIG__KAFKA_CLUSTER_NAME: cly-kafka + COUNTLY_CONFIG__KAFKA_PARTITIONS: "100" + COUNTLY_CONFIG__KAFKA_REPLICATIONFACTOR: "2" + COUNTLY_CONFIG__KAFKA_RETENTIONMS: "604800000" # 7 days in ms + COUNTLY_CONFIG__KAFKA_ENABLETRANSACTIONS: "false" + COUNTLY_CONFIG__KAFKA_TRANSACTIONTIMEOUT: "60000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_CLIENTID: countly-app + COUNTLY_CONFIG__KAFKA_RDKAFKA_REQUESTTIMEOUTMS: "20000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_CONNECTIONTIMEOUTMS: "8000" + COUNTLY_CONFIG__KAFKA_RDKAFKA_LINGERMS: "10" + COUNTLY_CONFIG__KAFKA_RDKAFKA_RETRIES: "5" + COUNTLY_CONFIG__KAFKA_RDKAFKA_ACKS: "-1" # -1 = all ISR replicas must acknowledge + COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMINBYTES: "1024000" + COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMAXWAITMS: "1200" + COUNTLY_CONFIG__KAFKA_CONSUMER_SESSIONTIMEOUTMS: "120000" + COUNTLY_CONFIG__KAFKA_CONSUMER_HEARTBEATINTERVALMS: "20000" + COUNTLY_CONFIG__KAFKA_CONSUMER_AUTOOFFSETRESET: earliest + COUNTLY_CONFIG__KAFKA_CONSUMER_ENABLEAUTOCOMMIT: "false" + COUNTLY_CONFIG__KAFKA_CONSUMER_MAXPOLLINTERVALMS: "600000" + COUNTLY_CONFIG__KAFKA_CONNECTCONSUMERGROUPID: "connect-ch" + + # --- OpenTelemetry --- + otel: + OTEL_ENABLED: "false" + OTEL_EXPORTER_OTLP_ENDPOINT: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4318" + OTEL_EXPORTER_OTLP_PROTOCOL: "http/protobuf" + OTEL_TRACES_SAMPLER: "parentbased_traceidratio" + OTEL_TRACES_SAMPLER_ARG: "1.0" # 0.0-1.0, fraction of traces to sample + PYROSCOPE_ENABLED: "false" + +# --- Node.js Options (injected into configmap per component) --- +nodeOptions: + api: "--max-old-space-size=3072 --max-semi-space-size=256" + frontend: "--max-old-space-size=2048" + ingestor: "--max-old-space-size=2048 --max-semi-space-size=256" + aggregator: "--max-old-space-size=3072 --max-semi-space-size=128" + jobserver: "--max-old-space-size=2048 --max-semi-space-size=256" + +# ============================================================================= +# Backing Service Modes +# ============================================================================= +# When mode=external, the corresponding chart is not deployed and connection +# details below are used instead. + +backingServices: + mongodb: + mode: bundled # bundled | external + host: "" + port: "27017" + connectionString: "" # If set, used as-is (bypasses host/port/user/pass) + username: "app" + password: "" + database: "admin" + replicaSet: "" + existingSecret: "" + # --- External MongoDB Atlas example --- + # mode: external + # connectionString: "mongodb+srv://user:pass@cluster0.example.mongodb.net/admin?retryWrites=true&w=majority" + + clickhouse: + mode: bundled # bundled | external + host: "" + port: "8123" + tls: "false" + username: "default" + password: "" + database: "countly_drill" + existingSecret: "" + # --- External ClickHouse Cloud example --- + # mode: external + # host: "abc123.us-east-1.aws.clickhouse.cloud" + # port: "8443" + # tls: "true" + + kafka: + mode: bundled # bundled | external + brokers: "" # Comma-separated broker list + securityProtocol: "PLAINTEXT" # PLAINTEXT | SSL | SASL_PLAINTEXT | SASL_SSL + saslMechanism: "" + saslUsername: "" + saslPassword: "" + existingSecret: "" + # --- External Confluent Cloud example --- + # mode: external + # brokers: "pkc-12345.us-east-1.aws.confluent.cloud:9092" + # securityProtocol: "SASL_SSL" + # saslMechanism: "PLAIN" + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + mode: values # values | existingSecret | externalSecret + keep: true # Retain secrets on helm uninstall + rotationId: "" # Change to force secret re-creation + + common: + existingSecret: "" + encryptionReportsKey: "" # REQUIRED: min 8 chars + webSessionSecret: "" # REQUIRED: min 8 chars + passwordSecret: "" # REQUIRED: min 8 chars + + clickhouse: + existingSecret: "" + username: "" + password: "" + database: "" + + kafka: + existingSecret: "" + securityProtocol: "" + saslMechanism: "" + saslUsername: "" + saslPassword: "" + + mongodb: + existingSecret: "" + key: "connectionString.standard" # Key within the secret to read + password: "" # REQUIRED on first install (must match users.app.password in countly-mongodb) + + # --- ExternalSecret configuration (used only when mode=externalSecret) --- + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: "" + webSessionSecret: "" + passwordSecret: "" + clickhouse: + url: "" + username: "" + password: "" + database: "" + kafka: + brokers: "" + securityProtocol: "" + saslMechanism: "" + saslUsername: "" + saslPassword: "" + mongodb: + connectionString: "" + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + ingressNamespaceSelector: + kubernetes.io/metadata.name: ingress-nginx + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-custom-namespace + # ports: + # - port: 3001 + # protocol: TCP + +# ============================================================================= +# Ingress +# ============================================================================= +ingress: + enabled: true + className: nginx + annotations: + # F5 NGINX Ingress Controller (OSS) annotations + nginx.org/client-max-body-size: "50m" + nginx.org/proxy-buffering: "True" + nginx.org/proxy-buffer-size: "256k" + nginx.org/proxy-buffers: "16 256k" + nginx.org/proxy-busy-buffers-size: "512k" + nginx.org/proxy-max-temp-file-size: "2048m" + nginx.org/client-body-buffer-size: "2m" + nginx.org/proxy-connect-timeout: "60s" + nginx.org/proxy-read-timeout: "120s" + nginx.org/proxy-send-timeout: "120s" + nginx.org/keepalive: "256" + nginx.org/server-snippets: | + otel_trace on; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Port $server_port; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_set_header Connection ""; + proxy_set_header X-Request-ID $request_id; + proxy_set_header X-Request-Start $msec; + # traceparent/tracestate now handled by ngx_otel_module (otel-trace-context: propagate) + client_header_timeout 30s; + nginx.org/location-snippets: | + proxy_request_buffering on; + proxy_next_upstream error timeout http_502 http_503 http_504; + proxy_next_upstream_timeout 30s; + proxy_next_upstream_tries 3; + proxy_temp_file_write_size 1m; + client_body_timeout 120s; + hostname: "" # Set via argocd/customers/.yaml + tls: + # TLS mode: letsencrypt | existingSecret | selfSigned | http + # http: No TLS + # letsencrypt: cert-manager + Let's Encrypt (recommended for production) + # existingSecret: Bring your own TLS secret + # selfSigned: cert-manager self-signed CA (for development) + mode: "" # Set via argocd/customers/.yaml + clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt + secretName: "" # Auto-derived if empty: -tls + selfSigned: + issuerName: "" # Auto-derived if empty: -ca-issuer + caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/environments/gcr-argo/external-secrets.example.yaml b/environments/gcr-argo/external-secrets.example.yaml new file mode 100644 index 0000000..dfd38d4 --- /dev/null +++ b/environments/gcr-argo/external-secrets.example.yaml @@ -0,0 +1,50 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in environments//countly.yaml: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: my-secret-store +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "countly/encryption-reports-key" +# webSessionSecret: "countly/web-session-secret" +# passwordSecret: "countly/password-secret" +# clickhouse: +# url: "countly/clickhouse-url" +# username: "countly/clickhouse-username" +# password: "countly/clickhouse-password" +# database: "countly/clickhouse-database" +# kafka: +# brokers: "countly/kafka-brokers" +# securityProtocol: "countly/kafka-security-protocol" +# mongodb: +# connectionString: "countly/mongodb-connection-string" +# +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "customers/acme/gar-dockerconfig" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/gcr-argo/global.yaml b/environments/gcr-argo/global.yaml new file mode 100644 index 0000000..333ac86 --- /dev/null +++ b/environments/gcr-argo/global.yaml @@ -0,0 +1,39 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: us-docker.pkg.dev/countly-01/countly-unified + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRef: + key: customers-gcr-argo-gar-dockerconfig + storageClass: "" + imagePullSecrets: + - name: countly-registry + +ingress: + hostname: gcr-argo.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/gcr-argo/image-pull-secrets.example.yaml b/environments/gcr-argo/image-pull-secrets.example.yaml new file mode 100644 index 0000000..f1f537f --- /dev/null +++ b/environments/gcr-argo/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-gar" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/gcr-argo/kafka.yaml b/environments/gcr-argo/kafka.yaml new file mode 100644 index 0000000..472336f --- /dev/null +++ b/environments/gcr-argo/kafka.yaml @@ -0,0 +1,341 @@ +# ============================================================================= +# Kafka Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-kafka/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imageSource: + mode: direct + gcpArtifactRegistry: + repositoryPrefix: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# --- Strimzi Operator API Version --- +strimzi: + apiVersion: kafka.strimzi.io/v1 + +# --- Kafka Version --- +version: "4.2.0" + +# ============================================================================= +# Brokers +# ============================================================================= +brokers: + replicas: 3 + resources: + requests: + cpu: "1" + memory: "4Gi" + limits: + cpu: "1" + memory: "4Gi" + jvmOptions: + xms: "2g" + xmx: "2g" + + # --- Persistence --- + persistence: + volumes: + - id: 0 + size: 100Gi + storageClass: "" + deleteClaim: false # Delete PVC when broker is removed + + # --- Broker Config --- + config: + default.replication.factor: 2 + min.insync.replicas: 2 + log.retention.hours: 168 # 7 days + log.segment.bytes: "1073741824" # 1 GiB + compression.type: lz4 + auto.create.topics.enable: false + offsets.topic.replication.factor: 2 + num.partitions: 24 + transaction.state.log.replication.factor: 2 + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +# ============================================================================= +# Controllers (KRaft) +# ============================================================================= +controllers: + replicas: 3 + resources: + requests: + cpu: "500m" + memory: "2Gi" + limits: + cpu: "1" + memory: "2Gi" + + persistence: + size: 20Gi + storageClass: "" + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + +# ============================================================================= +# Listeners +# ============================================================================= +listeners: + - name: internal + port: 9092 + type: internal + tls: false + +# ============================================================================= +# Cruise Control +# ============================================================================= +cruiseControl: + enabled: true + resources: + requests: + cpu: "1" + memory: "2Gi" + limits: + cpu: "1" + memory: "2Gi" + jvmOptions: + xms: "1g" + xmx: "2g" + autoRebalance: + - mode: add-brokers + - mode: remove-brokers + +# ============================================================================= +# Kafka Connect (ClickHouse Sink) +# ============================================================================= +kafkaConnect: + enabled: true + name: connect-ch + image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + replicas: 2 + bootstrapServers: "" # Auto-derived from cluster if empty + + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "2" + memory: "8Gi" + jvmOptions: + xms: "5g" + xmx: "5g" + + # --- Worker Configuration --- + workerConfig: + group.id: connect-ch + config.storage.topic: connect_ch_configs + offset.storage.topic: connect_ch_offsets + status.storage.topic: connect_ch_status + config.storage.replication.factor: 2 + offset.storage.replication.factor: 2 + status.storage.replication.factor: 2 + offset.storage.partitions: 25 + status.storage.partitions: 5 + key.converter: org.apache.kafka.connect.storage.StringConverter + value.converter: org.apache.kafka.connect.json.JsonConverter + value.converter.schemas.enable: "false" + connector.client.config.override.policy: All + config.providers: env + config.providers.env.class: org.apache.kafka.common.config.provider.EnvVarConfigProvider + + # --- ClickHouse Connection (for the sink connector) --- + clickhouse: + existingSecret: "" + secretName: clickhouse-auth + host: "" # Auto-derived from clickhouseNamespace if empty + port: "8123" + ssl: "false" + database: "countly_drill" + username: "default" + password: "" # REQUIRED: must match ClickHouse default user password + + # --- Environment Variables (injected into Connect pods) --- + env: + EXACTLY_ONCE: "false" + ERRORS_RETRY_TIMEOUT: "300" + ERRORS_TOLERANCE: "none" # none | all + CLICKHOUSE_SETTINGS: "input_format_binary_read_json_as_string=1,allow_experimental_json_type=1,enable_json_type=1,async_insert=1,wait_for_async_insert=1,async_insert_use_adaptive_busy_timeout=1,async_insert_busy_timeout_ms=10000,async_insert_max_data_size=268435456,async_insert_max_query_number=64,min_insert_block_size_rows=250000,min_insert_block_size_bytes=268435456,max_partitions_per_insert_block=500" + BYPASS_ROW_BINARY: "false" + TABLE_REFRESH_INTERVAL: "300" # seconds + KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter + VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter + VALUE_CONVERTER_SCHEMAS_ENABLE: "false" + KAFKA_CONSUMER_FETCH_MIN_BYTES: "33554432" # 32 MiB + KAFKA_CONSUMER_FETCH_MAX_WAIT_MS: "60000" + KAFKA_CONSUMER_MAX_POLL_RECORDS: "250000" + KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES: "134217728" # 128 MiB + KAFKA_CONSUMER_FETCH_MAX_BYTES: "536870912" # 512 MiB + KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS: "900000" + KAFKA_CONSUMER_SESSION_TIMEOUT_MS: "45000" + KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS: "15000" + KAFKA_CONSUMER_REQUEST_TIMEOUT_MS: "120000" + + # --- HPA --- + hpa: + enabled: false + minReplicas: 1 + maxReplicas: 3 + metrics: + cpu: + averageUtilization: 70 + memory: + averageUtilization: 80 + behavior: + scaleUp: + stabilizationWindowSeconds: 120 + policies: + - type: Percent + value: 50 + periodSeconds: 120 + - type: Pods + value: 2 + periodSeconds: 120 + selectPolicy: Min + scaleDown: + stabilizationWindowSeconds: 600 + policies: + - type: Percent + value: 25 + periodSeconds: 300 + - type: Pods + value: 1 + periodSeconds: 300 + selectPolicy: Min + + # --- OpenTelemetry Java Agent --- + # Baked into the Docker image at /opt/otel/opentelemetry-javaagent.jar. + # When enabled, JAVA_TOOL_OPTIONS activates it for Kafka consumer/producer + # and outbound HTTP (ClickHouse sink) span creation. + otel: + enabled: false + serviceName: "kafka-connect" + exporterEndpoint: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4317" + exporterProtocol: "grpc" + sampler: "parentbased_traceidratio" + samplerArg: "1.0" + resourceAttributes: "" # e.g. "deployment.environment=production,k8s.cluster.name=my-cluster" + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred + topologyKey: kubernetes.io/hostname + weight: 100 + + # --- Connectors --- + connectors: + - name: ch-sink-drill-events + enabled: true + state: running # running | paused | stopped + class: com.clickhouse.kafka.connect.ClickHouseSinkConnector + tasksMax: 1 + autoRestart: + enabled: true + maxRestarts: 10 + config: + topics: drill-events + topic2TableMap: "drill-events=drill_events" + hostname: "${env:CLICKHOUSE_HOST}" + port: "${env:CLICKHOUSE_PORT}" + ssl: "${env:CLICKHOUSE_SSL}" + database: "${env:CLICKHOUSE_DB}" + username: "${env:CLICKHOUSE_USER}" + password: "${env:CLICKHOUSE_PASSWORD}" + exactlyOnce: "${env:EXACTLY_ONCE}" + errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" + errors.tolerance: "${env:ERRORS_TOLERANCE}" + clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" + bypassRowBinary: "${env:BYPASS_ROW_BINARY}" + tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" + key.converter: "${env:KEY_CONVERTER}" + value.converter: "${env:VALUE_CONVERTER}" + value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" + consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" + consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" + consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" + consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" + consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" + consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" + consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" + consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" + consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" + connection.timeout: "60" + socket.timeout: "30000" + retry.count: "3" + connection.pool.size: "10" + healthcheck.enabled: "true" + healthcheck.interval: "10000" + dlq: {} # Dead-letter queue config (empty = disabled) + +# ============================================================================= +# Metrics +# ============================================================================= +metrics: + enabled: true + +# --- Cross-Namespace Reference --- +clickhouseNamespace: clickhouse + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 9092 + # protocol: TCP + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall diff --git a/environments/gcr-argo/migration.yaml b/environments/gcr-argo/migration.yaml new file mode 100644 index 0000000..6fa760c --- /dev/null +++ b/environments/gcr-argo/migration.yaml @@ -0,0 +1,3 @@ +# Migration overrides for optional countly-migration app. +# Enable per customer by setting `migration: enabled` in argocd/customers/.yaml +# and then filling this file with environment-specific overrides as needed. diff --git a/environments/gcr-argo/mongodb.yaml b/environments/gcr-argo/mongodb.yaml new file mode 100644 index 0000000..31f230e --- /dev/null +++ b/environments/gcr-argo/mongodb.yaml @@ -0,0 +1,144 @@ +# ============================================================================= +# MongoDB Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-mongodb/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + sizing: small # local | small | production + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +createNamespace: false + +# ============================================================================= +# MongoDB Server +# ============================================================================= +mongodb: + version: "8.2.5" + members: 2 # Replica set member count + + resources: + requests: + cpu: "500m" + memory: "2Gi" + limits: + cpu: "2" + memory: "8Gi" + + persistence: + storageClass: "" # Overrides global.storageClass for MongoDB PVCs + size: 100Gi + + # --- Scheduling --- + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + antiAffinity: + enabled: true + type: preferred # preferred | required + topologyKey: kubernetes.io/hostname + weight: 100 + + # --- TLS --- + tls: + enabled: false + +# ============================================================================= +# Users +# ============================================================================= +users: + # --- Application User --- + app: + name: app + database: admin + roles: + - name: readWriteAnyDatabase + db: admin + - name: dbAdmin + db: admin + passwordSecretName: app-user-password + passwordSecretKey: password + password: "" # REQUIRED on first install + + # --- Metrics Exporter User --- + metrics: + enabled: true + name: metrics + database: admin + roles: + - name: clusterMonitor + db: admin + - name: read + db: local + passwordSecretName: metrics-user-password + passwordSecretKey: password + password: "" # REQUIRED on first install + +# ============================================================================= +# Prometheus Exporter +# ============================================================================= +exporter: + enabled: true + image: percona/mongodb_exporter:0.40.0 + port: 9216 + resources: + requests: + cpu: "50m" + memory: "64Mi" + limits: + cpu: "200m" + memory: "256Mi" + service: + enabled: true + args: + - --collect-all + - --collector.diagnosticdata + - --collector.replicasetstatus + - --collector.dbstats + - --collector.topmetrics + - --collector.indexstats + - --collector.collstats + +# ============================================================================= +# Pod Disruption Budget +# ============================================================================= +podDisruptionBudget: + enabled: false + maxUnavailable: 1 + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + allowedNamespaces: + - countly + allowMonitoring: false + monitoringNamespaceSelector: + kubernetes.io/metadata.name: monitoring + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 27017 + # protocol: TCP + +# ============================================================================= +# Secrets +# ============================================================================= +secrets: + keep: true # Retain secrets on helm uninstall diff --git a/environments/gcr-argo/observability.yaml b/environments/gcr-argo/observability.yaml new file mode 100644 index 0000000..79a4b39 --- /dev/null +++ b/environments/gcr-argo/observability.yaml @@ -0,0 +1,437 @@ +# ============================================================================= +# Observability Chart — Comprehensive Reference Configuration +# ============================================================================= +# Every configurable key from charts/countly-observability/values.yaml is listed here. +# Override only what differs from the profile defaults in your environment. +# ============================================================================= + +# --- Deployment Mode --- +# full — All backends + Grafana in-cluster +# hybrid — All backends in-cluster, no Grafana (use external Grafana) +# external — Collectors only, forward to external endpoints +# disabled — Set in global.yaml observability.mode to skip this chart entirely +mode: full + +# --- Cluster Name (injected into Prometheus external_labels) --- +clusterName: countly-local + +# --- Cross-Namespace References --- +countlyNamespace: countly +clickhouseNamespace: clickhouse +mongodbNamespace: mongodb +kafkaNamespace: kafka +ingressNamespace: ingress-nginx +certManagerNamespace: cert-manager +clickhouseOperatorNamespace: clickhouse-operator-system + +# --- NGINX Ingress Controller Scrape Configuration --- +nginxIngress: + podLabelName: "nginx-ingress" # F5 NGINX IC = "nginx-ingress", community = "ingress-nginx" + metricsPort: "9113" + +# --- Global Settings (inherited from helmfile globals) --- +global: + imageRegistry: "" + imagePullSecrets: [] + storageClass: "" + scheduling: + nodeSelector: {} + tolerations: [] + +nameOverride: "" +fullnameOverride: "" + +# ============================================================================= +# Per-Signal Configuration +# ============================================================================= + +# --- Metrics --- +metrics: + enabled: true + sampling: + interval: "15s" # Global Prometheus scrape_interval + +# --- Traces --- +traces: + enabled: true + sampling: + strategy: "AlwaysOn" # AlwaysOn | TraceIdRatio | ParentBased | TailBased + ratio: 1.0 # 0.0-1.0, used with TraceIdRatio or ParentBased + tailSampling: # Only used when strategy == TailBased + waitDuration: "10s" + numTraces: 50000 + policies: + keepErrors: true + latencyThresholdMs: 2000 + baselineRatio: 0.1 + +# --- Logs --- +logs: + enabled: true + sampling: + enabled: false + dropRate: 0 # 0.0-1.0, fraction of logs to drop + +# --- Profiling --- +profiling: + enabled: true + sampling: + rate: "100" # Advisory — used in NOTES.txt for SDK config + +# ============================================================================= +# Prometheus +# ============================================================================= +prometheus: + image: + repository: prom/prometheus + tag: "v3.10.0" + retention: + time: "30d" + size: "50GB" + storage: + size: 100Gi + storageClass: "" + resources: + requests: + cpu: "2" + memory: "3Gi" + limits: + cpu: "2" + memory: "4Gi" + extraArgs: [] + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + remoteWriteUrl: "" # Full Prometheus remote write URL (e.g. https://prom.corp.com/api/v1/write) + +# ============================================================================= +# Loki +# ============================================================================= +loki: + image: + repository: grafana/loki + tag: "3.6.7" + retention: "30d" + storage: + backend: "filesystem" # filesystem | s3 | gcs | azure + size: 100Gi + storageClass: "" + # Object storage settings (only used when backend != filesystem) + bucket: "" # Bucket/container name (REQUIRED for object backends) + endpoint: "" # Custom endpoint (e.g. MinIO: http://minio:9000) + region: "" # Cloud region (S3) + insecure: false # Use HTTP instead of HTTPS + forcePathStyle: false # S3 path-style access (required for MinIO) + # Credential file secret (for GCS JSON key files) + existingSecret: "" # K8s Secret name to mount + secretKey: "key.json" # Key within the Secret + secretMountPath: "/var/secrets/storage" + # Env-based credentials (for AWS access keys, Azure account keys) + envFromSecret: "" # K8s Secret name to inject as env vars + # Provider-specific passthrough (rendered directly into provider block) + config: {} + # + # --- Object storage examples (apply to loki, tempo, and pyroscope) --- + # + # --- AWS S3 example --- + # backend: s3 + # s3: + # bucket: my-loki-data + # region: us-east-1 + # endpoint: "" + # insecure: false + # forcePathStyle: false + # credentials: + # source: envFromSecret + # envFromSecret: loki-s3-credentials # Must contain AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY + # + # --- GCS example --- + # backend: gcs + # gcs: + # bucket: my-loki-data + # credentials: + # source: existingSecret + # existingSecret: loki-gcs-key + # secretKey: "key.json" + # + # --- MinIO example --- + # backend: s3 + # s3: + # bucket: loki + # endpoint: minio.storage.svc.cluster.local:9000 + # insecure: true + # forcePathStyle: true + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + config: + maxStreamsPerUser: 30000 + maxLineSize: 256000 + ingestionRateMb: 64 + ingestionBurstSizeMb: 128 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + pushUrl: "" # Full Loki push URL (e.g. https://loki.corp.com/loki/api/v1/push) + +# ============================================================================= +# Tempo +# ============================================================================= +tempo: + image: + repository: grafana/tempo + tag: "2.10.1" + retention: "12h" + storage: + backend: "local" # local | s3 | gcs | azure + size: 150Gi + storageClass: "" + bucket: "" + endpoint: "" + region: "" + insecure: false + forcePathStyle: false + existingSecret: "" + secretKey: "key.json" + secretMountPath: "/var/secrets/storage" + envFromSecret: "" + config: {} + resources: + requests: + cpu: "3" + memory: "6Gi" + limits: + cpu: "4" + memory: "10Gi" + config: + ingestionRateLimitBytes: 100000000 + ingestionBurstSizeBytes: 150000000 + maxTracesPerUser: 50000 + maxBytesPerTrace: 5000000 + maxRecvMsgSizeMiB: 16 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + otlpGrpcEndpoint: "" # Tempo OTLP gRPC host:port (e.g. tempo.corp.com:4317) + otlpHttpEndpoint: "" # Tempo OTLP HTTP URL (optional fallback) + +# ============================================================================= +# Pyroscope +# ============================================================================= +pyroscope: + image: + repository: grafana/pyroscope + tag: "1.18.1" + retention: "72h" + storage: + backend: "filesystem" # filesystem | s3 | gcs | azure | swift + size: 20Gi + storageClass: "" + bucket: "" + endpoint: "" + region: "" + insecure: false + forcePathStyle: false + existingSecret: "" + secretKey: "key.json" + secretMountPath: "/var/secrets/storage" + envFromSecret: "" + config: {} + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + ingestUrl: "" # Pyroscope ingest URL (e.g. https://pyroscope.corp.com) + +# ============================================================================= +# Grafana +# ============================================================================= +grafana: + enabled: true # Only deployed when mode == "full" + image: + repository: grafana/grafana + tag: "12.4.0" + admin: + existingSecret: "" # Use an existing Secret for admin credentials + userKey: "admin-user" + passwordKey: "admin-password" + persistence: + enabled: false # Ephemeral by default (declarative config, no state to lose) + size: 10Gi + storageClass: "" + resources: + requests: + cpu: "1" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + plugins: + install: "grafana-pyroscope-datasource" + featureToggles: "tempoSearch,tempoBackendSearch,traceqlEditor,exploreTraces" + dashboards: + enabled: true + overview: true + platform: true + countly: true + data: true + edge: true + pdb: + enabled: false + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + affinity: {} + external: + url: "" # External Grafana URL (for NOTES.txt only) + +# ============================================================================= +# Alloy (DaemonSet — log collection) +# ============================================================================= +alloy: + image: + repository: grafana/alloy + tag: "v1.13.2" + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + scheduling: + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + +# ============================================================================= +# Alloy-OTLP (Deployment — OTLP traces + profiling receive) +# ============================================================================= +alloyOtlp: + image: + repository: grafana/alloy + tag: "v1.13.2" + replicas: 1 + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2" + memory: "2Gi" + memoryLimiter: + limit: "1600MiB" # Must be < resources.limits.memory + spikeLimit: "400MiB" + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# Alloy-Metrics (Deployment — ALL Prometheus scraping) +# ============================================================================= +alloyMetrics: + image: + repository: grafana/alloy + tag: "v1.13.2" + replicas: 1 + resources: + requests: + cpu: "500m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "512Mi" + pdb: + enabled: false + minAvailable: 1 + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# kube-state-metrics +# ============================================================================= +kubeStateMetrics: + enabled: true + image: + repository: registry.k8s.io/kube-state-metrics/kube-state-metrics + tag: "v2.18.0" + resources: + requests: + cpu: "10m" + memory: "32Mi" + limits: + cpu: "100m" + memory: "256Mi" + namespaces: + - countly + - observability + - ingress-nginx + - kube-system + - clickhouse + - mongodb + - kafka + scheduling: + nodeSelector: {} + tolerations: [] + +# ============================================================================= +# node-exporter +# ============================================================================= +nodeExporter: + enabled: true + image: + repository: prom/node-exporter + tag: "v1.10.2" + resources: + requests: + cpu: "100m" + memory: "180Mi" + limits: + cpu: "250m" + memory: "300Mi" + +# ============================================================================= +# Ingress (for Grafana) +# ============================================================================= +ingress: + enabled: false + className: nginx + annotations: {} + hosts: + - host: obs.example.com + tls: [] + +# ============================================================================= +# Network Policy +# ============================================================================= +networkPolicy: + enabled: false + additionalIngress: [] + # additionalIngress: + # - from: + # - namespaceSelector: + # matchLabels: + # kubernetes.io/metadata.name: my-app-namespace + # ports: + # - port: 4318 + # protocol: TCP diff --git a/environments/gcr-argo/secrets-clickhouse.yaml b/environments/gcr-argo/secrets-clickhouse.yaml new file mode 100644 index 0000000..4bc6a1c --- /dev/null +++ b/environments/gcr-argo/secrets-clickhouse.yaml @@ -0,0 +1,4 @@ +# ClickHouse secrets — FILL IN before first deploy +auth: + defaultUserPassword: + password: "GcrArgoClickhouse2026!" diff --git a/environments/gcr-argo/secrets-countly.yaml b/environments/gcr-argo/secrets-countly.yaml new file mode 100644 index 0000000..c80690a --- /dev/null +++ b/environments/gcr-argo/secrets-countly.yaml @@ -0,0 +1,16 @@ +# Countly secrets — FILL IN before first deploy +# Passwords must match across charts (see secrets.example.yaml) +secrets: + mode: values + common: + encryptionReportsKey: "gcr-argo-encryption-key-2026" + webSessionSecret: "gcr-argo-web-session-2026" + passwordSecret: "gcr-argo-password-secret-2026" + clickhouse: + username: "default" + password: "GcrArgoClickhouse2026!" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + mongodb: + password: "GcrArgoMongo2026!" diff --git a/environments/gcr-argo/secrets-kafka.yaml b/environments/gcr-argo/secrets-kafka.yaml new file mode 100644 index 0000000..d84c02c --- /dev/null +++ b/environments/gcr-argo/secrets-kafka.yaml @@ -0,0 +1,4 @@ +# Kafka secrets — FILL IN before first deploy +kafkaConnect: + clickhouse: + password: "GcrArgoClickhouse2026!" diff --git a/environments/gcr-argo/secrets-migration.yaml b/environments/gcr-argo/secrets-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/gcr-argo/secrets-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/gcr-argo/secrets-mongodb.yaml b/environments/gcr-argo/secrets-mongodb.yaml new file mode 100644 index 0000000..7e53925 --- /dev/null +++ b/environments/gcr-argo/secrets-mongodb.yaml @@ -0,0 +1,7 @@ +# MongoDB secrets — FILL IN before first deploy +users: + app: + password: "GcrArgoMongo2026!" + metrics: + enabled: true + password: "GcrArgoMongoMetrics2026!" diff --git a/environments/gcr-argo/secrets-observability.yaml b/environments/gcr-argo/secrets-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/gcr-argo/secrets-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/gcr-argo/secrets.example.yaml b/environments/gcr-argo/secrets.example.yaml new file mode 100644 index 0000000..282eb0d --- /dev/null +++ b/environments/gcr-argo/secrets.example.yaml @@ -0,0 +1,42 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//secrets-countly.yaml) --- +secrets: + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- +users: + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//secrets-kafka.yaml) --- +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" diff --git a/environments/gcr-argo/secrets.sops.example.yaml b/environments/gcr-argo/secrets.sops.example.yaml new file mode 100644 index 0000000..9b652d1 --- /dev/null +++ b/environments/gcr-argo/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//secrets-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//secrets-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx diff --git a/environments/local/kafka.yaml b/environments/local/kafka.yaml index 5eb3b42..6315304 100644 --- a/environments/local/kafka.yaml +++ b/environments/local/kafka.yaml @@ -5,6 +5,7 @@ # Use OTel-enabled image (includes /opt/otel/opentelemetry-javaagent.jar) kafkaConnect: image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-otel-strimzi-amd64" + artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-otel-strimzi-amd64" otel: enabled: true resourceAttributes: "service.namespace=countly,deployment.environment=local" diff --git a/environments/reference/README.md b/environments/reference/README.md index 12b374b..a2a0335 100644 --- a/environments/reference/README.md +++ b/environments/reference/README.md @@ -10,19 +10,21 @@ This directory is a complete starting point for a new Countly deployment. ``` 2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` 3. Fill in required secrets in the chart-specific files: - - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `clickhouse.yaml` → `auth.defaultUserPassword.password` - - `kafka.yaml` → `kafkaConnect.clickhouse.password` + - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `clickhouse.yaml` → `auth.defaultUserPassword.password` + - `kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` Or use `secrets.example.yaml` as a complete reference. @@ -49,6 +51,10 @@ For production, choose one of: - **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) - **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. + ## Files | File | Purpose | @@ -67,3 +73,5 @@ For production, choose one of: | `secrets.example.yaml` | Combined secrets reference (all charts in one file) | | `secrets.sops.example.yaml` | SOPS encryption guide | | `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/reference/cluster-secret-store.gcp.example.yaml b/environments/reference/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..e0493b8 --- /dev/null +++ b/environments/reference/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/reference/countly.yaml b/environments/reference/countly.yaml index ccf54bb..f01bf74 100644 --- a/environments/reference/countly.yaml +++ b/environments/reference/countly.yaml @@ -29,7 +29,8 @@ serviceAccount: # --- Image --- image: repository: gcr.io/countly-dev-313620/countly-unified - digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" + artifactRepository: countly-unified + digest: "sha256:b42efb9713ee11d173fe409924fb9e2a208b5c0beafed9e42f349b996b6650a4" tag: "26.01" # Fallback when digest is empty pullPolicy: IfNotPresent diff --git a/environments/reference/external-secrets.example.yaml b/environments/reference/external-secrets.example.yaml index 7bf93ef..dfd38d4 100644 --- a/environments/reference/external-secrets.example.yaml +++ b/environments/reference/external-secrets.example.yaml @@ -27,6 +27,20 @@ # mongodb: # connectionString: "countly/mongodb-connection-string" # +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "customers/acme/gar-dockerconfig" +# # Prerequisites: # 1. Install External Secrets Operator: https://external-secrets.io/ # 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend diff --git a/environments/reference/global.yaml b/environments/reference/global.yaml index 541701c..a487420 100644 --- a/environments/reference/global.yaml +++ b/environments/reference/global.yaml @@ -18,6 +18,18 @@ global: # --- Registry & Storage --- imageRegistry: "" # Private registry prefix (leave empty for public images) + imageSource: + mode: direct # direct | gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: "" # e.g. us-central1-docker.pkg.dev/my-project/my-repo + imagePullSecretExternalSecret: + enabled: false + refreshInterval: "1h" + secretStoreRef: + name: "" # e.g. gcp-secrets + kind: ClusterSecretStore + remoteRef: + key: "" # Secret Manager key containing the Docker config JSON storageClass: "" # Default storage class for all PVCs (leave empty for cluster default) imagePullSecrets: [] # Docker config secrets for private registries diff --git a/environments/reference/image-pull-secrets.example.yaml b/environments/reference/image-pull-secrets.example.yaml new file mode 100644 index 0000000..f1f537f --- /dev/null +++ b/environments/reference/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-gar" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/reference/kafka.yaml b/environments/reference/kafka.yaml index 544557d..472336f 100644 --- a/environments/reference/kafka.yaml +++ b/environments/reference/kafka.yaml @@ -8,6 +8,10 @@ # --- Global Settings (inherited from helmfile globals) --- global: imageRegistry: "" + imageSource: + mode: direct + gcpArtifactRegistry: + repositoryPrefix: "" imagePullSecrets: [] storageClass: "" sizing: small # local | small | production @@ -135,6 +139,7 @@ kafkaConnect: enabled: true name: connect-ch image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" replicas: 2 bootstrapServers: "" # Auto-derived from cluster if empty diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index cf7854f..bb43d57 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -23,6 +23,7 @@ Defaults: observability full kafkaConnect balanced migration disabled + gcpSA set after scaffold for External Secrets Workload Identity EOF } @@ -65,6 +66,18 @@ global: security: open imageRegistry: "" + imageSource: + mode: direct + gcpArtifactRegistry: + repositoryPrefix: "" + imagePullSecretExternalSecret: + enabled: false + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRef: + key: "" storageClass: "" imagePullSecrets: [] @@ -86,6 +99,11 @@ customer: ${customer} environment: ${customer} project: ${project} server: ${server} +gcpServiceAccountEmail: change-me@your-project.iam.gserviceaccount.com +secretManagerProjectID: change-me-secret-manager-project +clusterProjectID: change-me-cluster-project +clusterName: change-me-cluster-name +clusterLocation: change-me-cluster-location hostname: ${hostname} sizing: production security: open @@ -102,6 +120,7 @@ Created: Next: 1. Fill in environments/${customer}/secrets-*.yaml - 2. Review environments/${customer}/*.yaml for customer-specific overrides - 3. Commit and sync countly-bootstrap + 2. Set argocd/customers/${customer}.yaml GCP and cluster metadata for External Secrets + 3. Review environments/${customer}/*.yaml for customer-specific overrides + 4. Commit and sync countly-bootstrap EOF From c6faf3bc15c0bdc7aa78243173c06be0171328a0 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 21:09:11 +0530 Subject: [PATCH 32/79] Fix gcr-argo GAR overrides for countly and kafka --- environments/gcr-argo/countly.yaml | 19 ++++++++++++++++--- environments/gcr-argo/kafka.yaml | 17 +++++++++++++---- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/environments/gcr-argo/countly.yaml b/environments/gcr-argo/countly.yaml index 2df3d8e..1ba3cf6 100644 --- a/environments/gcr-argo/countly.yaml +++ b/environments/gcr-argo/countly.yaml @@ -8,9 +8,22 @@ # --- Global Settings (inherited from helmfile globals) --- global: imageRegistry: "" - imagePullSecrets: [] + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: us-docker.pkg.dev/countly-01/countly-unified + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRef: + key: customers-gcr-argo-gar-dockerconfig + imagePullSecrets: + - name: countly-registry storageClass: "" - sizing: small # local | small | production + sizing: tier1 # local | small | production scheduling: nodeSelector: {} tolerations: [] @@ -30,7 +43,7 @@ serviceAccount: image: repository: gcr.io/countly-dev-313620/countly-unified artifactRepository: countly-unified - digest: "sha256:f81b39d4488c596f76a5c385d088a8998b7c1b20933366ad994f5315597ec48b" + digest: "sha256:b42efb9713ee11d173fe409924fb9e2a208b5c0beafed9e42f349b996b6650a4" tag: "26.01" # Fallback when digest is empty pullPolicy: IfNotPresent diff --git a/environments/gcr-argo/kafka.yaml b/environments/gcr-argo/kafka.yaml index 472336f..9ee37a5 100644 --- a/environments/gcr-argo/kafka.yaml +++ b/environments/gcr-argo/kafka.yaml @@ -9,12 +9,21 @@ global: imageRegistry: "" imageSource: - mode: direct + mode: gcpArtifactRegistry gcpArtifactRegistry: - repositoryPrefix: "" - imagePullSecrets: [] + repositoryPrefix: us-docker.pkg.dev/countly-01/countly-unified + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRef: + key: customers-gcr-argo-gar-dockerconfig + imagePullSecrets: + - name: countly-registry storageClass: "" - sizing: small # local | small | production + sizing: tier1 # local | small | production scheduling: nodeSelector: {} tolerations: [] From 4bcd547df95be43426adf46c6fce21d719fec7b3 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 21:16:32 +0530 Subject: [PATCH 33/79] Add external-secrets operator appset --- .../07-external-secrets-operator.yaml | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 argocd/operators/07-external-secrets-operator.yaml diff --git a/argocd/operators/07-external-secrets-operator.yaml b/argocd/operators/07-external-secrets-operator.yaml new file mode 100644 index 0000000..de20652 --- /dev/null +++ b/argocd/operators/07-external-secrets-operator.yaml @@ -0,0 +1,44 @@ +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: customer-external-secrets + namespace: argocd +spec: + goTemplate: true + goTemplateOptions: + - missingkey=error + generators: + - git: + repoURL: https://github.com/Countly/helm.git + revision: gcp-artifact-rep-integration + files: + - path: argocd/customers/*.yaml + template: + metadata: + name: "{{ .customer }}-external-secrets" + annotations: + argocd.argoproj.io/sync-wave: "-23" + spec: + project: default + source: + repoURL: https://charts.external-secrets.io + chart: external-secrets + targetRevision: 1.3.1 + helm: + releaseName: external-secrets + values: | + installCRDs: true + serviceAccount: + create: true + annotations: + iam.gke.io/gcp-service-account: "{{ .gcpServiceAccountEmail }}" + destination: + server: "{{ .server }}" + namespace: external-secrets + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true + - ServerSideApply=true From deb76e6a18630e7576a6f81ac5e5e803c07729e5 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 21:25:03 +0530 Subject: [PATCH 34/79] Use External Secrets v1 APIs --- .../templates/clustersecretstore.yaml | 2 +- charts/countly-kafka/templates/external-secret-image-pull.yaml | 2 +- charts/countly/templates/external-secret-image-pull.yaml | 2 +- environments/gcr-argo/cluster-secret-store.gcp.example.yaml | 2 +- environments/reference/cluster-secret-store.gcp.example.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/charts/countly-cluster-secret-store/templates/clustersecretstore.yaml b/charts/countly-cluster-secret-store/templates/clustersecretstore.yaml index 36f75dd..991843c 100644 --- a/charts/countly-cluster-secret-store/templates/clustersecretstore.yaml +++ b/charts/countly-cluster-secret-store/templates/clustersecretstore.yaml @@ -1,4 +1,4 @@ -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ClusterSecretStore metadata: name: {{ .Values.secretStore.name }} diff --git a/charts/countly-kafka/templates/external-secret-image-pull.yaml b/charts/countly-kafka/templates/external-secret-image-pull.yaml index 4cdcebf..7354412 100644 --- a/charts/countly-kafka/templates/external-secret-image-pull.yaml +++ b/charts/countly-kafka/templates/external-secret-image-pull.yaml @@ -1,5 +1,5 @@ {{- if .Values.global.imagePullSecretExternalSecret.enabled }} -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: name: {{ required "global.imagePullSecrets[0].name is required when global.imagePullSecretExternalSecret.enabled is true" (include "countly-kafka.imagePullSecretName" .) }} diff --git a/charts/countly/templates/external-secret-image-pull.yaml b/charts/countly/templates/external-secret-image-pull.yaml index 8f64b39..032f9c1 100644 --- a/charts/countly/templates/external-secret-image-pull.yaml +++ b/charts/countly/templates/external-secret-image-pull.yaml @@ -1,5 +1,5 @@ {{- if .Values.global.imagePullSecretExternalSecret.enabled }} -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: name: {{ required "global.imagePullSecrets[0].name is required when global.imagePullSecretExternalSecret.enabled is true" (include "countly.imagePullSecretName" .) }} diff --git a/environments/gcr-argo/cluster-secret-store.gcp.example.yaml b/environments/gcr-argo/cluster-secret-store.gcp.example.yaml index e0493b8..7bb563f 100644 --- a/environments/gcr-argo/cluster-secret-store.gcp.example.yaml +++ b/environments/gcr-argo/cluster-secret-store.gcp.example.yaml @@ -13,7 +13,7 @@ # This file is a reference only. Adapt project IDs and names to your cluster. # ============================================================================= -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ClusterSecretStore metadata: name: gcp-secrets diff --git a/environments/reference/cluster-secret-store.gcp.example.yaml b/environments/reference/cluster-secret-store.gcp.example.yaml index e0493b8..7bb563f 100644 --- a/environments/reference/cluster-secret-store.gcp.example.yaml +++ b/environments/reference/cluster-secret-store.gcp.example.yaml @@ -13,7 +13,7 @@ # This file is a reference only. Adapt project IDs and names to your cluster. # ============================================================================= -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ClusterSecretStore metadata: name: gcp-secrets From 33f359e17e3a5a4bf21eef0347d4c2c2faf57c28 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 21:44:31 +0530 Subject: [PATCH 35/79] Use published Kafka Connect image tag --- charts/countly-kafka/values.yaml | 4 ++-- environments/gcr-argo/kafka.yaml | 4 ++-- environments/reference/kafka.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/countly-kafka/values.yaml b/charts/countly-kafka/values.yaml index b7d6eb0..3fe31f9 100644 --- a/charts/countly-kafka/values.yaml +++ b/charts/countly-kafka/values.yaml @@ -115,8 +115,8 @@ cruiseControl: kafkaConnect: enabled: true name: connect-ch - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" - artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" + artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" replicas: 2 bootstrapServers: "" resources: diff --git a/environments/gcr-argo/kafka.yaml b/environments/gcr-argo/kafka.yaml index 9ee37a5..de1d4d8 100644 --- a/environments/gcr-argo/kafka.yaml +++ b/environments/gcr-argo/kafka.yaml @@ -147,8 +147,8 @@ cruiseControl: kafkaConnect: enabled: true name: connect-ch - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" - artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" + artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" replicas: 2 bootstrapServers: "" # Auto-derived from cluster if empty diff --git a/environments/reference/kafka.yaml b/environments/reference/kafka.yaml index 472336f..13d11a0 100644 --- a/environments/reference/kafka.yaml +++ b/environments/reference/kafka.yaml @@ -138,8 +138,8 @@ cruiseControl: kafkaConnect: enabled: true name: connect-ch - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" - artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi-amd64" + image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" + artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" replicas: 2 bootstrapServers: "" # Auto-derived from cluster if empty From 01190fbb6251b87bc9a98e7c7f882c3041456938 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Tue, 31 Mar 2026 22:12:22 +0530 Subject: [PATCH 36/79] Keep customer env files as profile overrides --- environments/gcr-argo/clickhouse.yaml | 205 +------- environments/gcr-argo/countly.yaml | 567 +---------------------- environments/gcr-argo/kafka.yaml | 332 +------------ environments/gcr-argo/migration.yaml | 5 +- environments/gcr-argo/mongodb.yaml | 146 +----- environments/gcr-argo/observability.yaml | 439 +----------------- scripts/new-argocd-customer.sh | 28 ++ 7 files changed, 40 insertions(+), 1682 deletions(-) diff --git a/environments/gcr-argo/clickhouse.yaml b/environments/gcr-argo/clickhouse.yaml index d7899d3..17291a9 100644 --- a/environments/gcr-argo/clickhouse.yaml +++ b/environments/gcr-argo/clickhouse.yaml @@ -1,203 +1,2 @@ -# ============================================================================= -# ClickHouse Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-clickhouse/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Operator API Version --- -clickhouseOperator: - apiVersion: clickhouse.com/v1alpha1 - -# ============================================================================= -# Cluster Topology -# ============================================================================= -version: "26.2" -shards: 1 -replicas: 2 - -# ============================================================================= -# Images -# ============================================================================= -image: - server: clickhouse/clickhouse-server - keeper: clickhouse/clickhouse-keeper - -# ============================================================================= -# Database -# ============================================================================= -database: countly_drill - -# ============================================================================= -# Authentication -# ============================================================================= -auth: - # --- Default User Password --- - defaultUserPassword: - existingSecret: "" # Use an existing secret instead of creating one - secretName: clickhouse-default-password - key: password - password: "" # REQUIRED: ClickHouse default user password - - # --- Admin User (optional, separate from default) --- - adminUser: - enabled: false - # Precomputed SHA256 hex of the admin password (64 hex chars). - # Generate: echo -n 'your_password' | sha256sum | cut -d' ' -f1 - passwordSha256Hex: "" - -# ============================================================================= -# OpenTelemetry Server-Side Tracing -# ============================================================================= -# When enabled, ClickHouse logs spans to system.opentelemetry_span_log for -# queries arriving with W3C traceparent headers. -opentelemetry: - enabled: false - spanLog: - ttlDays: 7 - flushIntervalMs: 1000 - -# ============================================================================= -# Server -# ============================================================================= -server: - securityContext: - runAsNonRoot: true - runAsUser: 101 - runAsGroup: 101 - fsGroup: 101 - - resources: - requests: - cpu: "1" - memory: "4Gi" - limits: - cpu: "2" - memory: "8Gi" - - persistence: - storageClass: "" - size: 50Gi - - settings: - maxConnections: 4096 - extraConfig: "" # Raw XML injected into server config - extraUsersConfig: "" # Raw XML injected into users config - prometheus: - enabled: true - port: 9363 - endpoint: /metrics - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 - -# ============================================================================= -# Keeper (ClickHouse Keeper for replication coordination) -# ============================================================================= -keeper: - replicas: 1 - - securityContext: - runAsNonRoot: true - runAsUser: 101 - runAsGroup: 101 - fsGroup: 101 - - resources: - requests: - cpu: "250m" - memory: "512Mi" - limits: - cpu: "500m" - memory: "1Gi" - - persistence: - storageClass: "" - size: 5Gi - - settings: - prometheus: - enabled: true - port: 9090 - endpoint: /metrics - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - -# ============================================================================= -# Pod Disruption Budgets -# ============================================================================= -podDisruptionBudget: - server: - enabled: false - maxUnavailable: 1 - keeper: - enabled: false - maxUnavailable: 1 - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - - kafka - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-analytics-namespace - # ports: - # - port: 8123 - # protocol: TCP - -# ============================================================================= -# Service Monitor (Prometheus Operator CRD) -# ============================================================================= -serviceMonitor: - enabled: false - interval: "15s" - serviceType: headless # headless = per-pod scraping, clusterIP = any-pod - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/gcr-argo/countly.yaml b/environments/gcr-argo/countly.yaml index 1ba3cf6..53222d7 100644 --- a/environments/gcr-argo/countly.yaml +++ b/environments/gcr-argo/countly.yaml @@ -1,13 +1,7 @@ -# ============================================================================= -# Countly Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. -# --- Global Settings (inherited from helmfile globals) --- global: - imageRegistry: "" imageSource: mode: gcpArtifactRegistry gcpArtifactRegistry: @@ -22,563 +16,6 @@ global: key: customers-gcr-argo-gar-dockerconfig imagePullSecrets: - name: countly-registry - storageClass: "" - sizing: tier1 # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Service Account --- -serviceAccount: - create: true - name: "" # Auto-derived from release name if empty - annotations: {} - -# --- Image --- image: - repository: gcr.io/countly-dev-313620/countly-unified - artifactRepository: countly-unified digest: "sha256:b42efb9713ee11d173fe409924fb9e2a208b5c0beafed9e42f349b996b6650a4" - tag: "26.01" # Fallback when digest is empty - pullPolicy: IfNotPresent - -# --- Cross-Namespace References --- -clickhouseNamespace: clickhouse -kafkaNamespace: kafka -mongodbNamespace: mongodb - -# ============================================================================= -# Component: API -# ============================================================================= -api: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:api"] - port: 3001 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 120 - terminationGracePeriodSeconds: 120 - resources: - requests: - cpu: "1" - memory: "3.5Gi" - limits: - cpu: "1" - memory: "4Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 6 - metrics: - cpu: - averageUtilization: 70 - memory: - averageUtilization: 80 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 # 1-100, only used with type=preferred - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Frontend -# ============================================================================= -frontend: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:frontend"] - port: 6001 - healthCheck: - path: /ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - terminationGracePeriodSeconds: 30 - resources: - requests: - cpu: "1" - memory: "2.5Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 1 - metrics: - cpu: - averageUtilization: 80 - memory: {} - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Ingestor -# ============================================================================= -ingestor: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:ingestor"] - port: 3010 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 120 - terminationGracePeriodSeconds: 120 - resources: - requests: - cpu: "1" - memory: "3Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 12 - metrics: - cpu: - averageUtilization: 65 - memory: - averageUtilization: 75 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Aggregator -# ============================================================================= -aggregator: - enabled: true - replicaCount: 4 - command: ["npm", "run", "start:aggregator"] - port: 0 # No HTTP port exposed - healthCheck: {} # No HTTP health check (no port) - terminationGracePeriodSeconds: 60 - resources: - requests: - cpu: "1" - memory: "3.5Gi" - limits: - cpu: "2" - memory: "4Gi" - hpa: - enabled: true - minReplicas: 4 - maxReplicas: 8 - metrics: - cpu: - averageUtilization: 65 - memory: - averageUtilization: 65 - behavior: - scaleUp: - stabilizationWindowSeconds: 0 - policies: - - type: Percent - value: 100 - periodSeconds: 15 - scaleDown: - stabilizationWindowSeconds: 300 - policies: - - type: Percent - value: 10 - periodSeconds: 60 - pdb: - enabled: true - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Component: Job Server -# ============================================================================= -jobserver: - enabled: true - replicaCount: 1 - command: ["npm", "run", "start:jobserver"] - port: 3020 - healthCheck: - path: /o/ping - initialDelaySeconds: 30 - periodSeconds: 30 - timeoutSeconds: 5 - terminationGracePeriodSeconds: 30 - resources: - requests: - cpu: "1" - memory: "3Gi" - limits: - cpu: "1" - memory: "3Gi" - hpa: - enabled: true - minReplicas: 1 - maxReplicas: 1 - metrics: - cpu: - averageUtilization: 80 - memory: - averageUtilization: 85 - behavior: {} - pdb: - enabled: false - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: [] - antiAffinity: - enabled: false - extraEnv: [] - extraEnvFrom: [] - -# ============================================================================= -# Configuration (ConfigMaps) -# ============================================================================= -config: - # --- Common (shared by all components) --- - common: - NODE_ENV: production - COUNTLY_PLUGINS: "mobile,web,desktop,plugins,density,locale,browser,sources,views,logger,systemlogs,populator,reports,crashes,push,star-rating,slipping-away-users,compare,server-stats,dbviewer,crash_symbolication,crash-analytics,alerts,onboarding,consolidate,remote-config,hooks,dashboards,sdk,data-manager,guides,heatmaps,retention_segments,formulas,funnels,cohorts,ab-testing,performance-monitoring,config-transfer,data-migration,two-factor-auth,blocking,concurrent_users,revenue,activity-map,flows,surveys,event-timeline,drill,multi,active_users,ip-blocker,kafka,clickhouse" - COUNTLY_CONFIG__FILESTORAGE: gridfs - COUNTLY_CONFIG__DRILL_EVENTS_DRIVER: clickhouse - COUNTLY_CONFIG__SHARED_CONNECTION: "true" - COUNTLY_CONFIG__DATABASE_ADAPTERPREFERENCE: '["clickhouse","mongodb"]' - COUNTLY_CONFIG__DATABASE_ADAPTERS_MONGODB_ENABLED: "true" - COUNTLY_CONFIG__DATABASE_ADAPTERS_CLICKHOUSE_ENABLED: "true" - COUNTLY_CONFIG__DATABASE_FAILONCONNECTIONERROR: "true" - COUNTLY_CONFIG__EVENTSINK_SINKS: '["kafka"]' - COUNTLY_CONFIG__RELOADCONFIGAFTER: "10000" - - # --- API --- - api: - COUNTLY_CONTAINER: api - COUNTLY_CONFIG__API_PORT: "3001" - COUNTLY_CONFIG__API_HOST: "0.0.0.0" - COUNTLY_CONFIG__API_MAX_SOCKETS: "1024" - COUNTLY_CONFIG__API_MAX_UPLOAD_FILE_SIZE: "209715200" # 200 MiB - COUNTLY_CONFIG__API_TIMEOUT: "120000" # ms - - # --- Frontend --- - frontend: - COUNTLY_CONTAINER: frontend - COUNTLY_CONFIG__WEB_PORT: "6001" - COUNTLY_CONFIG__WEB_HOST: "0.0.0.0" - COUNTLY_CONFIG__WEB_SECURE_COOKIES: "false" - COUNTLY_CONFIG__COOKIE_MAXAGE: "86400000" # 24 hours in ms - - # --- Ingestor --- - ingestor: - COUNTLY_CONTAINER: ingestor - COUNTLY_CONFIG__INGESTOR_PORT: "3010" - COUNTLY_CONFIG__INGESTOR_HOST: "0.0.0.0" - - # --- Aggregator --- - aggregator: - COUNTLY_CONTAINER: aggregator - UV_THREADPOOL_SIZE: "6" - - # --- Job Server --- - jobserver: - COUNTLY_CONTAINER: jobserver - COUNTLY_CONFIG__JOBSERVER_PORT: "3020" - COUNTLY_CONFIG__JOBSERVER_HOST: "0.0.0.0" - - # --- ClickHouse Connection --- - clickhouse: - COUNTLY_CONFIG__CLICKHOUSE_QUERYOPTIONS_MAX_EXECUTION_TIME: "600" - COUNTLY_CONFIG__CLICKHOUSE_REQUEST_TIMEOUT: "1200000" # ms - COUNTLY_CONFIG__CLICKHOUSE_MAX_OPEN_CONNECTIONS: "10" - COUNTLY_CONFIG__CLICKHOUSE_APPLICATION: countly_drill - COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_REQUEST: "false" - COUNTLY_CONFIG__CLICKHOUSE_COMPRESSION_RESPONSE: "false" - COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_ENABLED: "true" - COUNTLY_CONFIG__CLICKHOUSE_KEEP_ALIVE_IDLE_SOCKET_TTL: "10000" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_NAME: countly_cluster - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_SHARDS: "false" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_REPLICAS: "false" - COUNTLY_CONFIG__CLICKHOUSE_CLUSTER_ISCLOUD: "false" - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_COORDINATORTYPE: keeper - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_ZKPATH: "/clickhouse/tables/{shard}/{database}/{table}" - COUNTLY_CONFIG__CLICKHOUSE_REPLICATION_REPLICANAME: "{replica}" - COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_ENABLED: "false" - COUNTLY_CONFIG__CLICKHOUSE_PARALLELREPLICAS_MAXPARALLELREPLICAS: "2" - COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_WRITETHROUGH: "true" - COUNTLY_CONFIG__CLICKHOUSE_DISTRIBUTED_INSERTDISTRIBUTEDSYNC: "true" - COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_NATIVEPORT: "9000" - COUNTLY_CONFIG__CLICKHOUSE_DICTIONARY_SECURE: "false" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_DAYSOLD: "30" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MIN: "60" - COUNTLY_CONFIG__CLICKHOUSE_IDENTITY_LIFETIME_MAX: "120" - - # --- Kafka Connection --- - kafka: - COUNTLY_CONFIG__KAFKA_ENABLED: "true" - COUNTLY_CONFIG__KAFKA_DRILLEVENTSTOPIC: drill-events - COUNTLY_CONFIG__KAFKA_CLUSTER_NAME: cly-kafka - COUNTLY_CONFIG__KAFKA_PARTITIONS: "100" - COUNTLY_CONFIG__KAFKA_REPLICATIONFACTOR: "2" - COUNTLY_CONFIG__KAFKA_RETENTIONMS: "604800000" # 7 days in ms - COUNTLY_CONFIG__KAFKA_ENABLETRANSACTIONS: "false" - COUNTLY_CONFIG__KAFKA_TRANSACTIONTIMEOUT: "60000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_CLIENTID: countly-app - COUNTLY_CONFIG__KAFKA_RDKAFKA_REQUESTTIMEOUTMS: "20000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_CONNECTIONTIMEOUTMS: "8000" - COUNTLY_CONFIG__KAFKA_RDKAFKA_LINGERMS: "10" - COUNTLY_CONFIG__KAFKA_RDKAFKA_RETRIES: "5" - COUNTLY_CONFIG__KAFKA_RDKAFKA_ACKS: "-1" # -1 = all ISR replicas must acknowledge - COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMINBYTES: "1024000" - COUNTLY_CONFIG__KAFKA_CONSUMER_FETCHMAXWAITMS: "1200" - COUNTLY_CONFIG__KAFKA_CONSUMER_SESSIONTIMEOUTMS: "120000" - COUNTLY_CONFIG__KAFKA_CONSUMER_HEARTBEATINTERVALMS: "20000" - COUNTLY_CONFIG__KAFKA_CONSUMER_AUTOOFFSETRESET: earliest - COUNTLY_CONFIG__KAFKA_CONSUMER_ENABLEAUTOCOMMIT: "false" - COUNTLY_CONFIG__KAFKA_CONSUMER_MAXPOLLINTERVALMS: "600000" - COUNTLY_CONFIG__KAFKA_CONNECTCONSUMERGROUPID: "connect-ch" - - # --- OpenTelemetry --- - otel: - OTEL_ENABLED: "false" - OTEL_EXPORTER_OTLP_ENDPOINT: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4318" - OTEL_EXPORTER_OTLP_PROTOCOL: "http/protobuf" - OTEL_TRACES_SAMPLER: "parentbased_traceidratio" - OTEL_TRACES_SAMPLER_ARG: "1.0" # 0.0-1.0, fraction of traces to sample - PYROSCOPE_ENABLED: "false" - -# --- Node.js Options (injected into configmap per component) --- -nodeOptions: - api: "--max-old-space-size=3072 --max-semi-space-size=256" - frontend: "--max-old-space-size=2048" - ingestor: "--max-old-space-size=2048 --max-semi-space-size=256" - aggregator: "--max-old-space-size=3072 --max-semi-space-size=128" - jobserver: "--max-old-space-size=2048 --max-semi-space-size=256" - -# ============================================================================= -# Backing Service Modes -# ============================================================================= -# When mode=external, the corresponding chart is not deployed and connection -# details below are used instead. - -backingServices: - mongodb: - mode: bundled # bundled | external - host: "" - port: "27017" - connectionString: "" # If set, used as-is (bypasses host/port/user/pass) - username: "app" - password: "" - database: "admin" - replicaSet: "" - existingSecret: "" - # --- External MongoDB Atlas example --- - # mode: external - # connectionString: "mongodb+srv://user:pass@cluster0.example.mongodb.net/admin?retryWrites=true&w=majority" - - clickhouse: - mode: bundled # bundled | external - host: "" - port: "8123" - tls: "false" - username: "default" - password: "" - database: "countly_drill" - existingSecret: "" - # --- External ClickHouse Cloud example --- - # mode: external - # host: "abc123.us-east-1.aws.clickhouse.cloud" - # port: "8443" - # tls: "true" - - kafka: - mode: bundled # bundled | external - brokers: "" # Comma-separated broker list - securityProtocol: "PLAINTEXT" # PLAINTEXT | SSL | SASL_PLAINTEXT | SASL_SSL - saslMechanism: "" - saslUsername: "" - saslPassword: "" - existingSecret: "" - # --- External Confluent Cloud example --- - # mode: external - # brokers: "pkc-12345.us-east-1.aws.confluent.cloud:9092" - # securityProtocol: "SASL_SSL" - # saslMechanism: "PLAIN" - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - mode: values # values | existingSecret | externalSecret - keep: true # Retain secrets on helm uninstall - rotationId: "" # Change to force secret re-creation - - common: - existingSecret: "" - encryptionReportsKey: "" # REQUIRED: min 8 chars - webSessionSecret: "" # REQUIRED: min 8 chars - passwordSecret: "" # REQUIRED: min 8 chars - - clickhouse: - existingSecret: "" - username: "" - password: "" - database: "" - - kafka: - existingSecret: "" - securityProtocol: "" - saslMechanism: "" - saslUsername: "" - saslPassword: "" - - mongodb: - existingSecret: "" - key: "connectionString.standard" # Key within the secret to read - password: "" # REQUIRED on first install (must match users.app.password in countly-mongodb) - - # --- ExternalSecret configuration (used only when mode=externalSecret) --- - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: "" - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: "" - webSessionSecret: "" - passwordSecret: "" - clickhouse: - url: "" - username: "" - password: "" - database: "" - kafka: - brokers: "" - securityProtocol: "" - saslMechanism: "" - saslUsername: "" - saslPassword: "" - mongodb: - connectionString: "" - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - ingressNamespaceSelector: - kubernetes.io/metadata.name: ingress-nginx - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-custom-namespace - # ports: - # - port: 3001 - # protocol: TCP - -# ============================================================================= -# Ingress -# ============================================================================= -ingress: - enabled: true - className: nginx - annotations: - # F5 NGINX Ingress Controller (OSS) annotations - nginx.org/client-max-body-size: "50m" - nginx.org/proxy-buffering: "True" - nginx.org/proxy-buffer-size: "256k" - nginx.org/proxy-buffers: "16 256k" - nginx.org/proxy-busy-buffers-size: "512k" - nginx.org/proxy-max-temp-file-size: "2048m" - nginx.org/client-body-buffer-size: "2m" - nginx.org/proxy-connect-timeout: "60s" - nginx.org/proxy-read-timeout: "120s" - nginx.org/proxy-send-timeout: "120s" - nginx.org/keepalive: "256" - nginx.org/server-snippets: | - otel_trace on; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-Port $server_port; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Scheme $scheme; - proxy_set_header Connection ""; - proxy_set_header X-Request-ID $request_id; - proxy_set_header X-Request-Start $msec; - # traceparent/tracestate now handled by ngx_otel_module (otel-trace-context: propagate) - client_header_timeout 30s; - nginx.org/location-snippets: | - proxy_request_buffering on; - proxy_next_upstream error timeout http_502 http_503 http_504; - proxy_next_upstream_timeout 30s; - proxy_next_upstream_tries 3; - proxy_temp_file_write_size 1m; - client_body_timeout 120s; - hostname: "" # Set via argocd/customers/.yaml - tls: - # TLS mode: letsencrypt | existingSecret | selfSigned | http - # http: No TLS - # letsencrypt: cert-manager + Let's Encrypt (recommended for production) - # existingSecret: Bring your own TLS secret - # selfSigned: cert-manager self-signed CA (for development) - mode: "" # Set via argocd/customers/.yaml - clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt - secretName: "" # Auto-derived if empty: -tls - selfSigned: - issuerName: "" # Auto-derived if empty: -ca-issuer - caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/environments/gcr-argo/kafka.yaml b/environments/gcr-argo/kafka.yaml index de1d4d8..919b3d8 100644 --- a/environments/gcr-argo/kafka.yaml +++ b/environments/gcr-argo/kafka.yaml @@ -1,13 +1,7 @@ -# ============================================================================= -# Kafka Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-kafka/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. -# --- Global Settings (inherited from helmfile globals) --- global: - imageRegistry: "" imageSource: mode: gcpArtifactRegistry gcpArtifactRegistry: @@ -22,329 +16,7 @@ global: key: customers-gcr-argo-gar-dockerconfig imagePullSecrets: - name: countly-registry - storageClass: "" - sizing: tier1 # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# --- Strimzi Operator API Version --- -strimzi: - apiVersion: kafka.strimzi.io/v1 - -# --- Kafka Version --- -version: "4.2.0" - -# ============================================================================= -# Brokers -# ============================================================================= -brokers: - replicas: 3 - resources: - requests: - cpu: "1" - memory: "4Gi" - limits: - cpu: "1" - memory: "4Gi" - jvmOptions: - xms: "2g" - xmx: "2g" - - # --- Persistence --- - persistence: - volumes: - - id: 0 - size: 100Gi - storageClass: "" - deleteClaim: false # Delete PVC when broker is removed - - # --- Broker Config --- - config: - default.replication.factor: 2 - min.insync.replicas: 2 - log.retention.hours: 168 # 7 days - log.segment.bytes: "1073741824" # 1 GiB - compression.type: lz4 - auto.create.topics.enable: false - offsets.topic.replication.factor: 2 - num.partitions: 24 - transaction.state.log.replication.factor: 2 - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - -# ============================================================================= -# Controllers (KRaft) -# ============================================================================= -controllers: - replicas: 3 - resources: - requests: - cpu: "500m" - memory: "2Gi" - limits: - cpu: "1" - memory: "2Gi" - - persistence: - size: 20Gi - storageClass: "" - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - -# ============================================================================= -# Listeners -# ============================================================================= -listeners: - - name: internal - port: 9092 - type: internal - tls: false - -# ============================================================================= -# Cruise Control -# ============================================================================= -cruiseControl: - enabled: true - resources: - requests: - cpu: "1" - memory: "2Gi" - limits: - cpu: "1" - memory: "2Gi" - jvmOptions: - xms: "1g" - xmx: "2g" - autoRebalance: - - mode: add-brokers - - mode: remove-brokers - -# ============================================================================= -# Kafka Connect (ClickHouse Sink) -# ============================================================================= kafkaConnect: - enabled: true - name: connect-ch image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" - replicas: 2 - bootstrapServers: "" # Auto-derived from cluster if empty - - resources: - requests: - cpu: "2" - memory: "8Gi" - limits: - cpu: "2" - memory: "8Gi" - jvmOptions: - xms: "5g" - xmx: "5g" - - # --- Worker Configuration --- - workerConfig: - group.id: connect-ch - config.storage.topic: connect_ch_configs - offset.storage.topic: connect_ch_offsets - status.storage.topic: connect_ch_status - config.storage.replication.factor: 2 - offset.storage.replication.factor: 2 - status.storage.replication.factor: 2 - offset.storage.partitions: 25 - status.storage.partitions: 5 - key.converter: org.apache.kafka.connect.storage.StringConverter - value.converter: org.apache.kafka.connect.json.JsonConverter - value.converter.schemas.enable: "false" - connector.client.config.override.policy: All - config.providers: env - config.providers.env.class: org.apache.kafka.common.config.provider.EnvVarConfigProvider - - # --- ClickHouse Connection (for the sink connector) --- - clickhouse: - existingSecret: "" - secretName: clickhouse-auth - host: "" # Auto-derived from clickhouseNamespace if empty - port: "8123" - ssl: "false" - database: "countly_drill" - username: "default" - password: "" # REQUIRED: must match ClickHouse default user password - - # --- Environment Variables (injected into Connect pods) --- - env: - EXACTLY_ONCE: "false" - ERRORS_RETRY_TIMEOUT: "300" - ERRORS_TOLERANCE: "none" # none | all - CLICKHOUSE_SETTINGS: "input_format_binary_read_json_as_string=1,allow_experimental_json_type=1,enable_json_type=1,async_insert=1,wait_for_async_insert=1,async_insert_use_adaptive_busy_timeout=1,async_insert_busy_timeout_ms=10000,async_insert_max_data_size=268435456,async_insert_max_query_number=64,min_insert_block_size_rows=250000,min_insert_block_size_bytes=268435456,max_partitions_per_insert_block=500" - BYPASS_ROW_BINARY: "false" - TABLE_REFRESH_INTERVAL: "300" # seconds - KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter - VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter - VALUE_CONVERTER_SCHEMAS_ENABLE: "false" - KAFKA_CONSUMER_FETCH_MIN_BYTES: "33554432" # 32 MiB - KAFKA_CONSUMER_FETCH_MAX_WAIT_MS: "60000" - KAFKA_CONSUMER_MAX_POLL_RECORDS: "250000" - KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES: "134217728" # 128 MiB - KAFKA_CONSUMER_FETCH_MAX_BYTES: "536870912" # 512 MiB - KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS: "900000" - KAFKA_CONSUMER_SESSION_TIMEOUT_MS: "45000" - KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS: "15000" - KAFKA_CONSUMER_REQUEST_TIMEOUT_MS: "120000" - - # --- HPA --- - hpa: - enabled: false - minReplicas: 1 - maxReplicas: 3 - metrics: - cpu: - averageUtilization: 70 - memory: - averageUtilization: 80 - behavior: - scaleUp: - stabilizationWindowSeconds: 120 - policies: - - type: Percent - value: 50 - periodSeconds: 120 - - type: Pods - value: 2 - periodSeconds: 120 - selectPolicy: Min - scaleDown: - stabilizationWindowSeconds: 600 - policies: - - type: Percent - value: 25 - periodSeconds: 300 - - type: Pods - value: 1 - periodSeconds: 300 - selectPolicy: Min - - # --- OpenTelemetry Java Agent --- - # Baked into the Docker image at /opt/otel/opentelemetry-javaagent.jar. - # When enabled, JAVA_TOOL_OPTIONS activates it for Kafka consumer/producer - # and outbound HTTP (ClickHouse sink) span creation. - otel: - enabled: false - serviceName: "kafka-connect" - exporterEndpoint: "http://countly-observability-alloy-otlp.observability.svc.cluster.local:4317" - exporterProtocol: "grpc" - sampler: "parentbased_traceidratio" - samplerArg: "1.0" - resourceAttributes: "" # e.g. "deployment.environment=production,k8s.cluster.name=my-cluster" - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred - topologyKey: kubernetes.io/hostname - weight: 100 - - # --- Connectors --- - connectors: - - name: ch-sink-drill-events - enabled: true - state: running # running | paused | stopped - class: com.clickhouse.kafka.connect.ClickHouseSinkConnector - tasksMax: 1 - autoRestart: - enabled: true - maxRestarts: 10 - config: - topics: drill-events - topic2TableMap: "drill-events=drill_events" - hostname: "${env:CLICKHOUSE_HOST}" - port: "${env:CLICKHOUSE_PORT}" - ssl: "${env:CLICKHOUSE_SSL}" - database: "${env:CLICKHOUSE_DB}" - username: "${env:CLICKHOUSE_USER}" - password: "${env:CLICKHOUSE_PASSWORD}" - exactlyOnce: "${env:EXACTLY_ONCE}" - errors.retry.timeout: "${env:ERRORS_RETRY_TIMEOUT}" - errors.tolerance: "${env:ERRORS_TOLERANCE}" - clickhouseSettings: "${env:CLICKHOUSE_SETTINGS}" - bypassRowBinary: "${env:BYPASS_ROW_BINARY}" - tableRefreshInterval: "${env:TABLE_REFRESH_INTERVAL}" - key.converter: "${env:KEY_CONVERTER}" - value.converter: "${env:VALUE_CONVERTER}" - value.converter.schemas.enable: "${env:VALUE_CONVERTER_SCHEMAS_ENABLE}" - consumer.override.fetch.min.bytes: "${env:KAFKA_CONSUMER_FETCH_MIN_BYTES}" - consumer.override.fetch.max.wait.ms: "${env:KAFKA_CONSUMER_FETCH_MAX_WAIT_MS}" - consumer.override.max.poll.records: "${env:KAFKA_CONSUMER_MAX_POLL_RECORDS}" - consumer.override.max.partition.fetch.bytes: "${env:KAFKA_CONSUMER_MAX_PARTITION_FETCH_BYTES}" - consumer.override.fetch.max.bytes: "${env:KAFKA_CONSUMER_FETCH_MAX_BYTES}" - consumer.override.request.timeout.ms: "${env:KAFKA_CONSUMER_REQUEST_TIMEOUT_MS}" - consumer.override.max.poll.interval.ms: "${env:KAFKA_CONSUMER_MAX_POLL_INTERVAL_MS}" - consumer.override.session.timeout.ms: "${env:KAFKA_CONSUMER_SESSION_TIMEOUT_MS}" - consumer.override.heartbeat.interval.ms: "${env:KAFKA_CONSUMER_HEARTBEAT_INTERVAL_MS}" - connection.timeout: "60" - socket.timeout: "30000" - retry.count: "3" - connection.pool.size: "10" - healthcheck.enabled: "true" - healthcheck.interval: "10000" - dlq: {} # Dead-letter queue config (empty = disabled) - -# ============================================================================= -# Metrics -# ============================================================================= -metrics: - enabled: true - -# --- Cross-Namespace Reference --- -clickhouseNamespace: clickhouse - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 9092 - # protocol: TCP - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall diff --git a/environments/gcr-argo/migration.yaml b/environments/gcr-argo/migration.yaml index 6fa760c..7cb4b34 100644 --- a/environments/gcr-argo/migration.yaml +++ b/environments/gcr-argo/migration.yaml @@ -1,3 +1,2 @@ -# Migration overrides for optional countly-migration app. -# Enable per customer by setting `migration: enabled` in argocd/customers/.yaml -# and then filling this file with environment-specific overrides as needed. +# Customer-specific migration overrides only. +# This deployment uses the disabled migration profile. diff --git a/environments/gcr-argo/mongodb.yaml b/environments/gcr-argo/mongodb.yaml index 31f230e..ebe28cc 100644 --- a/environments/gcr-argo/mongodb.yaml +++ b/environments/gcr-argo/mongodb.yaml @@ -1,144 +1,2 @@ -# ============================================================================= -# MongoDB Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-mongodb/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - sizing: small # local | small | production - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -createNamespace: false - -# ============================================================================= -# MongoDB Server -# ============================================================================= -mongodb: - version: "8.2.5" - members: 2 # Replica set member count - - resources: - requests: - cpu: "500m" - memory: "2Gi" - limits: - cpu: "2" - memory: "8Gi" - - persistence: - storageClass: "" # Overrides global.storageClass for MongoDB PVCs - size: 100Gi - - # --- Scheduling --- - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - antiAffinity: - enabled: true - type: preferred # preferred | required - topologyKey: kubernetes.io/hostname - weight: 100 - - # --- TLS --- - tls: - enabled: false - -# ============================================================================= -# Users -# ============================================================================= -users: - # --- Application User --- - app: - name: app - database: admin - roles: - - name: readWriteAnyDatabase - db: admin - - name: dbAdmin - db: admin - passwordSecretName: app-user-password - passwordSecretKey: password - password: "" # REQUIRED on first install - - # --- Metrics Exporter User --- - metrics: - enabled: true - name: metrics - database: admin - roles: - - name: clusterMonitor - db: admin - - name: read - db: local - passwordSecretName: metrics-user-password - passwordSecretKey: password - password: "" # REQUIRED on first install - -# ============================================================================= -# Prometheus Exporter -# ============================================================================= -exporter: - enabled: true - image: percona/mongodb_exporter:0.40.0 - port: 9216 - resources: - requests: - cpu: "50m" - memory: "64Mi" - limits: - cpu: "200m" - memory: "256Mi" - service: - enabled: true - args: - - --collect-all - - --collector.diagnosticdata - - --collector.replicasetstatus - - --collector.dbstats - - --collector.topmetrics - - --collector.indexstats - - --collector.collstats - -# ============================================================================= -# Pod Disruption Budget -# ============================================================================= -podDisruptionBudget: - enabled: false - maxUnavailable: 1 - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - allowedNamespaces: - - countly - allowMonitoring: false - monitoringNamespaceSelector: - kubernetes.io/metadata.name: monitoring - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 27017 - # protocol: TCP - -# ============================================================================= -# Secrets -# ============================================================================= -secrets: - keep: true # Retain secrets on helm uninstall +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/gcr-argo/observability.yaml b/environments/gcr-argo/observability.yaml index 79a4b39..56e78bb 100644 --- a/environments/gcr-argo/observability.yaml +++ b/environments/gcr-argo/observability.yaml @@ -1,437 +1,2 @@ -# ============================================================================= -# Observability Chart — Comprehensive Reference Configuration -# ============================================================================= -# Every configurable key from charts/countly-observability/values.yaml is listed here. -# Override only what differs from the profile defaults in your environment. -# ============================================================================= - -# --- Deployment Mode --- -# full — All backends + Grafana in-cluster -# hybrid — All backends in-cluster, no Grafana (use external Grafana) -# external — Collectors only, forward to external endpoints -# disabled — Set in global.yaml observability.mode to skip this chart entirely -mode: full - -# --- Cluster Name (injected into Prometheus external_labels) --- -clusterName: countly-local - -# --- Cross-Namespace References --- -countlyNamespace: countly -clickhouseNamespace: clickhouse -mongodbNamespace: mongodb -kafkaNamespace: kafka -ingressNamespace: ingress-nginx -certManagerNamespace: cert-manager -clickhouseOperatorNamespace: clickhouse-operator-system - -# --- NGINX Ingress Controller Scrape Configuration --- -nginxIngress: - podLabelName: "nginx-ingress" # F5 NGINX IC = "nginx-ingress", community = "ingress-nginx" - metricsPort: "9113" - -# --- Global Settings (inherited from helmfile globals) --- -global: - imageRegistry: "" - imagePullSecrets: [] - storageClass: "" - scheduling: - nodeSelector: {} - tolerations: [] - -nameOverride: "" -fullnameOverride: "" - -# ============================================================================= -# Per-Signal Configuration -# ============================================================================= - -# --- Metrics --- -metrics: - enabled: true - sampling: - interval: "15s" # Global Prometheus scrape_interval - -# --- Traces --- -traces: - enabled: true - sampling: - strategy: "AlwaysOn" # AlwaysOn | TraceIdRatio | ParentBased | TailBased - ratio: 1.0 # 0.0-1.0, used with TraceIdRatio or ParentBased - tailSampling: # Only used when strategy == TailBased - waitDuration: "10s" - numTraces: 50000 - policies: - keepErrors: true - latencyThresholdMs: 2000 - baselineRatio: 0.1 - -# --- Logs --- -logs: - enabled: true - sampling: - enabled: false - dropRate: 0 # 0.0-1.0, fraction of logs to drop - -# --- Profiling --- -profiling: - enabled: true - sampling: - rate: "100" # Advisory — used in NOTES.txt for SDK config - -# ============================================================================= -# Prometheus -# ============================================================================= -prometheus: - image: - repository: prom/prometheus - tag: "v3.10.0" - retention: - time: "30d" - size: "50GB" - storage: - size: 100Gi - storageClass: "" - resources: - requests: - cpu: "2" - memory: "3Gi" - limits: - cpu: "2" - memory: "4Gi" - extraArgs: [] - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - remoteWriteUrl: "" # Full Prometheus remote write URL (e.g. https://prom.corp.com/api/v1/write) - -# ============================================================================= -# Loki -# ============================================================================= -loki: - image: - repository: grafana/loki - tag: "3.6.7" - retention: "30d" - storage: - backend: "filesystem" # filesystem | s3 | gcs | azure - size: 100Gi - storageClass: "" - # Object storage settings (only used when backend != filesystem) - bucket: "" # Bucket/container name (REQUIRED for object backends) - endpoint: "" # Custom endpoint (e.g. MinIO: http://minio:9000) - region: "" # Cloud region (S3) - insecure: false # Use HTTP instead of HTTPS - forcePathStyle: false # S3 path-style access (required for MinIO) - # Credential file secret (for GCS JSON key files) - existingSecret: "" # K8s Secret name to mount - secretKey: "key.json" # Key within the Secret - secretMountPath: "/var/secrets/storage" - # Env-based credentials (for AWS access keys, Azure account keys) - envFromSecret: "" # K8s Secret name to inject as env vars - # Provider-specific passthrough (rendered directly into provider block) - config: {} - # - # --- Object storage examples (apply to loki, tempo, and pyroscope) --- - # - # --- AWS S3 example --- - # backend: s3 - # s3: - # bucket: my-loki-data - # region: us-east-1 - # endpoint: "" - # insecure: false - # forcePathStyle: false - # credentials: - # source: envFromSecret - # envFromSecret: loki-s3-credentials # Must contain AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY - # - # --- GCS example --- - # backend: gcs - # gcs: - # bucket: my-loki-data - # credentials: - # source: existingSecret - # existingSecret: loki-gcs-key - # secretKey: "key.json" - # - # --- MinIO example --- - # backend: s3 - # s3: - # bucket: loki - # endpoint: minio.storage.svc.cluster.local:9000 - # insecure: true - # forcePathStyle: true - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1" - memory: "2Gi" - config: - maxStreamsPerUser: 30000 - maxLineSize: 256000 - ingestionRateMb: 64 - ingestionBurstSizeMb: 128 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - pushUrl: "" # Full Loki push URL (e.g. https://loki.corp.com/loki/api/v1/push) - -# ============================================================================= -# Tempo -# ============================================================================= -tempo: - image: - repository: grafana/tempo - tag: "2.10.1" - retention: "12h" - storage: - backend: "local" # local | s3 | gcs | azure - size: 150Gi - storageClass: "" - bucket: "" - endpoint: "" - region: "" - insecure: false - forcePathStyle: false - existingSecret: "" - secretKey: "key.json" - secretMountPath: "/var/secrets/storage" - envFromSecret: "" - config: {} - resources: - requests: - cpu: "3" - memory: "6Gi" - limits: - cpu: "4" - memory: "10Gi" - config: - ingestionRateLimitBytes: 100000000 - ingestionBurstSizeBytes: 150000000 - maxTracesPerUser: 50000 - maxBytesPerTrace: 5000000 - maxRecvMsgSizeMiB: 16 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - otlpGrpcEndpoint: "" # Tempo OTLP gRPC host:port (e.g. tempo.corp.com:4317) - otlpHttpEndpoint: "" # Tempo OTLP HTTP URL (optional fallback) - -# ============================================================================= -# Pyroscope -# ============================================================================= -pyroscope: - image: - repository: grafana/pyroscope - tag: "1.18.1" - retention: "72h" - storage: - backend: "filesystem" # filesystem | s3 | gcs | azure | swift - size: 20Gi - storageClass: "" - bucket: "" - endpoint: "" - region: "" - insecure: false - forcePathStyle: false - existingSecret: "" - secretKey: "key.json" - secretMountPath: "/var/secrets/storage" - envFromSecret: "" - config: {} - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "1" - memory: "2Gi" - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - ingestUrl: "" # Pyroscope ingest URL (e.g. https://pyroscope.corp.com) - -# ============================================================================= -# Grafana -# ============================================================================= -grafana: - enabled: true # Only deployed when mode == "full" - image: - repository: grafana/grafana - tag: "12.4.0" - admin: - existingSecret: "" # Use an existing Secret for admin credentials - userKey: "admin-user" - passwordKey: "admin-password" - persistence: - enabled: false # Ephemeral by default (declarative config, no state to lose) - size: 10Gi - storageClass: "" - resources: - requests: - cpu: "1" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - plugins: - install: "grafana-pyroscope-datasource" - featureToggles: "tempoSearch,tempoBackendSearch,traceqlEditor,exploreTraces" - dashboards: - enabled: true - overview: true - platform: true - countly: true - data: true - edge: true - pdb: - enabled: false - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - affinity: {} - external: - url: "" # External Grafana URL (for NOTES.txt only) - -# ============================================================================= -# Alloy (DaemonSet — log collection) -# ============================================================================= -alloy: - image: - repository: grafana/alloy - tag: "v1.13.2" - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - scheduling: - nodeSelector: - kubernetes.io/os: linux - tolerations: [] - -# ============================================================================= -# Alloy-OTLP (Deployment — OTLP traces + profiling receive) -# ============================================================================= -alloyOtlp: - image: - repository: grafana/alloy - tag: "v1.13.2" - replicas: 1 - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2" - memory: "2Gi" - memoryLimiter: - limit: "1600MiB" # Must be < resources.limits.memory - spikeLimit: "400MiB" - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# Alloy-Metrics (Deployment — ALL Prometheus scraping) -# ============================================================================= -alloyMetrics: - image: - repository: grafana/alloy - tag: "v1.13.2" - replicas: 1 - resources: - requests: - cpu: "500m" - memory: "512Mi" - limits: - cpu: "500m" - memory: "512Mi" - pdb: - enabled: false - minAvailable: 1 - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# kube-state-metrics -# ============================================================================= -kubeStateMetrics: - enabled: true - image: - repository: registry.k8s.io/kube-state-metrics/kube-state-metrics - tag: "v2.18.0" - resources: - requests: - cpu: "10m" - memory: "32Mi" - limits: - cpu: "100m" - memory: "256Mi" - namespaces: - - countly - - observability - - ingress-nginx - - kube-system - - clickhouse - - mongodb - - kafka - scheduling: - nodeSelector: {} - tolerations: [] - -# ============================================================================= -# node-exporter -# ============================================================================= -nodeExporter: - enabled: true - image: - repository: prom/node-exporter - tag: "v1.10.2" - resources: - requests: - cpu: "100m" - memory: "180Mi" - limits: - cpu: "250m" - memory: "300Mi" - -# ============================================================================= -# Ingress (for Grafana) -# ============================================================================= -ingress: - enabled: false - className: nginx - annotations: {} - hosts: - - host: obs.example.com - tls: [] - -# ============================================================================= -# Network Policy -# ============================================================================= -networkPolicy: - enabled: false - additionalIngress: [] - # additionalIngress: - # - from: - # - namespaceSelector: - # matchLabels: - # kubernetes.io/metadata.name: my-app-namespace - # ports: - # - port: 4318 - # protocol: TCP +# Customer-specific observability overrides only. +# This deployment uses the disabled observability profile. diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index bb43d57..034184f 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -94,6 +94,34 @@ backingServices: mode: bundled EOF +cat > "${env_dir}/countly.yaml" <<'EOF' +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. +EOF + +cat > "${env_dir}/kafka.yaml" <<'EOF' +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. +EOF + +cat > "${env_dir}/clickhouse.yaml" <<'EOF' +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. +EOF + +cat > "${env_dir}/mongodb.yaml" <<'EOF' +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. +EOF + +cat > "${env_dir}/observability.yaml" <<'EOF' +# Customer-specific observability overrides only. +EOF + +cat > "${env_dir}/migration.yaml" <<'EOF' +# Customer-specific migration overrides only. +EOF + cat > "${customer_file}" < Date: Tue, 31 Mar 2026 22:40:21 +0530 Subject: [PATCH 37/79] Ignore benign Argo drift for ESO, ingress, and Keeper --- argocd/applicationsets/01-clickhouse.yaml | 2 ++ argocd/applicationsets/02-kafka.yaml | 7 +++++++ argocd/applicationsets/03-countly.yaml | 7 +++++++ argocd/operators/05-nginx-ingress.yaml | 2 ++ 4 files changed, 18 insertions(+) diff --git a/argocd/applicationsets/01-clickhouse.yaml b/argocd/applicationsets/01-clickhouse.yaml index 2126c0c..a4443ab 100644 --- a/argocd/applicationsets/01-clickhouse.yaml +++ b/argocd/applicationsets/01-clickhouse.yaml @@ -65,3 +65,5 @@ spec: kind: KeeperCluster jsonPointers: - /status + - /spec/containerTemplate/resources/requests/memory + - /spec/containerTemplate/resources/limits/memory diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml index 9b2d6d4..a299697 100644 --- a/argocd/applicationsets/02-kafka.yaml +++ b/argocd/applicationsets/02-kafka.yaml @@ -79,3 +79,10 @@ spec: kind: KafkaNodePool jsonPointers: - /status + - group: external-secrets.io + kind: ExternalSecret + name: countly-registry + jsonPointers: + - /spec/data/0/remoteRef/conversionStrategy + - /spec/data/0/remoteRef/decodingStrategy + - /spec/data/0/remoteRef/metadataPolicy diff --git a/argocd/applicationsets/03-countly.yaml b/argocd/applicationsets/03-countly.yaml index bf6b728..fd23824 100644 --- a/argocd/applicationsets/03-countly.yaml +++ b/argocd/applicationsets/03-countly.yaml @@ -73,3 +73,10 @@ spec: kind: Ingress jsonPointers: - /status + - group: external-secrets.io + kind: ExternalSecret + name: countly-registry + jsonPointers: + - /spec/data/0/remoteRef/conversionStrategy + - /spec/data/0/remoteRef/decodingStrategy + - /spec/data/0/remoteRef/metadataPolicy diff --git a/argocd/operators/05-nginx-ingress.yaml b/argocd/operators/05-nginx-ingress.yaml index 2b43ab0..3d7b348 100644 --- a/argocd/operators/05-nginx-ingress.yaml +++ b/argocd/operators/05-nginx-ingress.yaml @@ -75,3 +75,5 @@ spec: jsonPointers: - /metadata/annotations/cloud.google.com~1neg - /spec/healthCheckNodePort + - /spec/ports/0/nodePort + - /spec/ports/1/nodePort From 7d1bf3f284ebd857a4829037d9d8368c92b76d1f Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 00:20:54 +0530 Subject: [PATCH 38/79] Add Secret Manager-backed app secrets via External Secrets --- README.md | 2 + argocd/applicationsets/00-mongodb.yaml | 6 ++ argocd/applicationsets/01-clickhouse.yaml | 6 ++ argocd/applicationsets/02-kafka.yaml | 9 +- argocd/applicationsets/03-countly.yaml | 9 +- .../external-secret-default-password.yaml | 22 +++++ .../templates/secret-default-password.yaml | 2 +- charts/countly-clickhouse/values.schema.json | 25 +++++- charts/countly-clickhouse/values.yaml | 8 ++ .../external-secret-clickhouse-connect.yaml | 25 ++++++ .../templates/secret-clickhouse-connect.yaml | 2 +- charts/countly-kafka/values.schema.json | 31 ++++++- charts/countly-kafka/values.yaml | 10 +++ .../templates/external-secret.yaml | 2 +- .../templates/secret-passwords.yaml | 54 +++++++++++- charts/countly-mongodb/values.schema.json | 36 +++++++- charts/countly-mongodb/values.yaml | 11 +++ .../templates/external-secret-clickhouse.yaml | 2 +- .../templates/external-secret-common.yaml | 2 +- .../templates/external-secret-kafka.yaml | 2 +- .../templates/external-secret-mongodb.yaml | 2 +- environments/reference/README.md | 10 +-- .../reference/external-secrets.example.yaml | 85 ++++++++++++++----- .../reference/secrets-clickhouse.yaml | 14 +++ environments/reference/secrets-countly.yaml | 24 ++++++ environments/reference/secrets-kafka.yaml | 16 ++++ environments/reference/secrets-mongodb.yaml | 17 ++++ environments/reference/secrets.example.yaml | 17 ++++ 28 files changed, 400 insertions(+), 51 deletions(-) create mode 100644 charts/countly-clickhouse/templates/external-secret-default-password.yaml create mode 100644 charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml diff --git a/README.md b/README.md index 84e64ca..cc60422 100644 --- a/README.md +++ b/README.md @@ -160,6 +160,7 @@ Install required operators before deploying Countly. See [docs/PREREQUISITES.md] - Set `global.imagePullSecrets` when pulling from a private registry such as GAR 3. **Fill in required secrets** in the chart-specific files. See `environments/reference/secrets.example.yaml` for a complete reference. + Keep `secrets.mode: values` for direct YAML values, switch to `secrets.mode: externalSecret` to have the charts create `ExternalSecret` resources backed by your Secret Manager store. 4. **Register your environment** in `helmfile.yaml.gotmpl`: ```yaml @@ -177,6 +178,7 @@ Install required operators before deploying Countly. See [docs/PREREQUISITES.md] For a GAR-backed production example, see [environments/example-production/global.yaml](/Users/admin/cly/helm/environments/example-production/global.yaml) and replace `countly-gar` with your Kubernetes docker-registry secret name. For GitOps-managed pull secrets, start from [environments/reference/image-pull-secrets.example.yaml](/Users/admin/cly/helm/environments/reference/image-pull-secrets.example.yaml) and encrypt or template it before committing. For Secret Manager + External Secrets Operator, set `global.imagePullSecretExternalSecret` in your environment `global.yaml` so Countly and Kafka Connect each create their own namespaced `dockerconfigjson` pull secret. +Application secrets can use the same pattern in `secrets-countly.yaml`, `secrets-kafka.yaml`, `secrets-clickhouse.yaml`, and `secrets-mongodb.yaml` by switching `secrets.mode` to `externalSecret` and filling `secrets.externalSecret.remoteRefs`. ### GitOps Customer Onboarding diff --git a/argocd/applicationsets/00-mongodb.yaml b/argocd/applicationsets/00-mongodb.yaml index c780cf1..b2e4e10 100644 --- a/argocd/applicationsets/00-mongodb.yaml +++ b/argocd/applicationsets/00-mongodb.yaml @@ -61,3 +61,9 @@ spec: kind: MongoDBCommunity jsonPointers: - /status + - group: external-secrets.io + kind: ExternalSecret + jqPathExpressions: + - .spec.data[]?.remoteRef.conversionStrategy + - .spec.data[]?.remoteRef.decodingStrategy + - .spec.data[]?.remoteRef.metadataPolicy diff --git a/argocd/applicationsets/01-clickhouse.yaml b/argocd/applicationsets/01-clickhouse.yaml index a4443ab..655b96b 100644 --- a/argocd/applicationsets/01-clickhouse.yaml +++ b/argocd/applicationsets/01-clickhouse.yaml @@ -67,3 +67,9 @@ spec: - /status - /spec/containerTemplate/resources/requests/memory - /spec/containerTemplate/resources/limits/memory + - group: external-secrets.io + kind: ExternalSecret + jqPathExpressions: + - .spec.data[]?.remoteRef.conversionStrategy + - .spec.data[]?.remoteRef.decodingStrategy + - .spec.data[]?.remoteRef.metadataPolicy diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml index a299697..6809a49 100644 --- a/argocd/applicationsets/02-kafka.yaml +++ b/argocd/applicationsets/02-kafka.yaml @@ -81,8 +81,7 @@ spec: - /status - group: external-secrets.io kind: ExternalSecret - name: countly-registry - jsonPointers: - - /spec/data/0/remoteRef/conversionStrategy - - /spec/data/0/remoteRef/decodingStrategy - - /spec/data/0/remoteRef/metadataPolicy + jqPathExpressions: + - .spec.data[]?.remoteRef.conversionStrategy + - .spec.data[]?.remoteRef.decodingStrategy + - .spec.data[]?.remoteRef.metadataPolicy diff --git a/argocd/applicationsets/03-countly.yaml b/argocd/applicationsets/03-countly.yaml index fd23824..e90ee9f 100644 --- a/argocd/applicationsets/03-countly.yaml +++ b/argocd/applicationsets/03-countly.yaml @@ -75,8 +75,7 @@ spec: - /status - group: external-secrets.io kind: ExternalSecret - name: countly-registry - jsonPointers: - - /spec/data/0/remoteRef/conversionStrategy - - /spec/data/0/remoteRef/decodingStrategy - - /spec/data/0/remoteRef/metadataPolicy + jqPathExpressions: + - .spec.data[]?.remoteRef.conversionStrategy + - .spec.data[]?.remoteRef.decodingStrategy + - .spec.data[]?.remoteRef.metadataPolicy diff --git a/charts/countly-clickhouse/templates/external-secret-default-password.yaml b/charts/countly-clickhouse/templates/external-secret-default-password.yaml new file mode 100644 index 0000000..f234c51 --- /dev/null +++ b/charts/countly-clickhouse/templates/external-secret-default-password.yaml @@ -0,0 +1,22 @@ +{{- if and (eq (.Values.secrets.mode | default "values") "externalSecret") (not .Values.auth.defaultUserPassword.existingSecret) }} +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: {{ .Values.auth.defaultUserPassword.secretName }} + labels: + {{- include "countly-clickhouse.labels" . | nindent 4 }} + annotations: + {{- include "countly-clickhouse.syncWave" (dict "wave" "0" "root" .) | nindent 4 }} +spec: + refreshInterval: {{ .Values.secrets.externalSecret.refreshInterval | default "1h" }} + secretStoreRef: + name: {{ required "secrets.externalSecret.secretStoreRef.name is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.secretStoreRef.name }} + kind: {{ .Values.secrets.externalSecret.secretStoreRef.kind | default "ClusterSecretStore" }} + target: + name: {{ .Values.auth.defaultUserPassword.secretName }} + creationPolicy: Owner + data: + - secretKey: {{ .Values.auth.defaultUserPassword.key }} + remoteRef: + key: {{ required "secrets.externalSecret.remoteRefs.defaultUserPassword is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.remoteRefs.defaultUserPassword }} +{{- end }} diff --git a/charts/countly-clickhouse/templates/secret-default-password.yaml b/charts/countly-clickhouse/templates/secret-default-password.yaml index cbeaa37..87cf770 100644 --- a/charts/countly-clickhouse/templates/secret-default-password.yaml +++ b/charts/countly-clickhouse/templates/secret-default-password.yaml @@ -1,4 +1,4 @@ -{{- if not .Values.auth.defaultUserPassword.existingSecret }} +{{- if and (ne (.Values.secrets.mode | default "values") "externalSecret") (not .Values.auth.defaultUserPassword.existingSecret) }} apiVersion: v1 kind: Secret metadata: diff --git a/charts/countly-clickhouse/values.schema.json b/charts/countly-clickhouse/values.schema.json index 3bb5f65..404976d 100644 --- a/charts/countly-clickhouse/values.schema.json +++ b/charts/countly-clickhouse/values.schema.json @@ -143,7 +143,30 @@ "secrets": { "type": "object", "properties": { - "keep": { "type": "boolean" } + "keep": { "type": "boolean" }, + "mode": { + "type": "string", + "enum": ["values", "existingSecret", "externalSecret"] + }, + "externalSecret": { + "type": "object", + "properties": { + "refreshInterval": { "type": "string" }, + "secretStoreRef": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "kind": { "type": "string" } + } + }, + "remoteRefs": { + "type": "object", + "properties": { + "defaultUserPassword": { "type": "string" } + } + } + } + } } } } diff --git a/charts/countly-clickhouse/values.yaml b/charts/countly-clickhouse/values.yaml index 2e77eb2..53316a3 100644 --- a/charts/countly-clickhouse/values.yaml +++ b/charts/countly-clickhouse/values.yaml @@ -144,3 +144,11 @@ serviceMonitor: secrets: keep: true + mode: values # values | existingSecret | externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: "" diff --git a/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml b/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml new file mode 100644 index 0000000..f6e17a5 --- /dev/null +++ b/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.kafkaConnect.enabled (eq (.Values.secrets.mode | default "values") "externalSecret") (not .Values.kafkaConnect.clickhouse.existingSecret) }} +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: {{ .Values.kafkaConnect.clickhouse.secretName }} + labels: + {{- include "countly-kafka.labels" . | nindent 4 }} + annotations: + {{- include "countly-kafka.syncWave" (dict "wave" "0" "root" .) | nindent 4 }} +spec: + refreshInterval: {{ .Values.secrets.externalSecret.refreshInterval | default "1h" }} + secretStoreRef: + name: {{ required "secrets.externalSecret.secretStoreRef.name is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.secretStoreRef.name }} + kind: {{ .Values.secrets.externalSecret.secretStoreRef.kind | default "ClusterSecretStore" }} + target: + name: {{ .Values.kafkaConnect.clickhouse.secretName }} + creationPolicy: Owner + data: + - secretKey: username + remoteRef: + key: {{ required "secrets.externalSecret.remoteRefs.clickhouse.username is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.remoteRefs.clickhouse.username }} + - secretKey: password + remoteRef: + key: {{ required "secrets.externalSecret.remoteRefs.clickhouse.password is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.remoteRefs.clickhouse.password }} +{{- end }} diff --git a/charts/countly-kafka/templates/secret-clickhouse-connect.yaml b/charts/countly-kafka/templates/secret-clickhouse-connect.yaml index 6039fca..54f2090 100644 --- a/charts/countly-kafka/templates/secret-clickhouse-connect.yaml +++ b/charts/countly-kafka/templates/secret-clickhouse-connect.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.kafkaConnect.enabled (not .Values.kafkaConnect.clickhouse.existingSecret) }} +{{- if and .Values.kafkaConnect.enabled (ne (.Values.secrets.mode | default "values") "externalSecret") (not .Values.kafkaConnect.clickhouse.existingSecret) }} apiVersion: v1 kind: Secret metadata: diff --git a/charts/countly-kafka/values.schema.json b/charts/countly-kafka/values.schema.json index 1e7d7da..87f27e0 100644 --- a/charts/countly-kafka/values.schema.json +++ b/charts/countly-kafka/values.schema.json @@ -176,7 +176,36 @@ "secrets": { "type": "object", "properties": { - "keep": { "type": "boolean" } + "keep": { "type": "boolean" }, + "mode": { + "type": "string", + "enum": ["values", "existingSecret", "externalSecret"] + }, + "externalSecret": { + "type": "object", + "properties": { + "refreshInterval": { "type": "string" }, + "secretStoreRef": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "kind": { "type": "string" } + } + }, + "remoteRefs": { + "type": "object", + "properties": { + "clickhouse": { + "type": "object", + "properties": { + "username": { "type": "string" }, + "password": { "type": "string" } + } + } + } + } + } + } } } } diff --git a/charts/countly-kafka/values.yaml b/charts/countly-kafka/values.yaml index 3fe31f9..6e54a9d 100644 --- a/charts/countly-kafka/values.yaml +++ b/charts/countly-kafka/values.yaml @@ -283,3 +283,13 @@ networkPolicy: secrets: keep: true + mode: values # values | existingSecret | externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRefs: + clickhouse: + username: "" + password: "" diff --git a/charts/countly-migration/templates/external-secret.yaml b/charts/countly-migration/templates/external-secret.yaml index aec8814..3871333 100644 --- a/charts/countly-migration/templates/external-secret.yaml +++ b/charts/countly-migration/templates/external-secret.yaml @@ -1,5 +1,5 @@ {{- if eq (.Values.secrets.mode | default "values") "externalSecret" }} -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: name: {{ include "countly-migration.fullname" . }} diff --git a/charts/countly-mongodb/templates/secret-passwords.yaml b/charts/countly-mongodb/templates/secret-passwords.yaml index 1200229..368692b 100644 --- a/charts/countly-mongodb/templates/secret-passwords.yaml +++ b/charts/countly-mongodb/templates/secret-passwords.yaml @@ -1,3 +1,4 @@ +{{- if eq (.Values.secrets.mode | default "values") "values" }} apiVersion: v1 kind: Secret metadata: @@ -41,7 +42,54 @@ data: {{ index $existing.data .Values.users.metrics.passwordSecretKey }} {{- else if .Values.users.metrics.password }} {{ .Values.users.metrics.password | b64enc }} - {{- else }} - {{- fail "MongoDB metrics user password is required on first install. Set users.metrics.password." }} - {{- end }} +{{- else }} +{{- fail "MongoDB metrics user password is required on first install. Set users.metrics.password." }} +{{- end }} +{{- end }} +{{- end }} +{{- if eq (.Values.secrets.mode | default "values") "externalSecret" }} +--- +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: {{ .Values.users.app.passwordSecretName }} + labels: + {{- include "countly-mongodb.labels" . | nindent 4 }} + annotations: + {{- include "countly-mongodb.syncWave" (dict "wave" "0" "root" .) | nindent 4 }} +spec: + refreshInterval: {{ .Values.secrets.externalSecret.refreshInterval | default "1h" }} + secretStoreRef: + name: {{ required "secrets.externalSecret.secretStoreRef.name is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.secretStoreRef.name }} + kind: {{ .Values.secrets.externalSecret.secretStoreRef.kind | default "ClusterSecretStore" }} + target: + name: {{ .Values.users.app.passwordSecretName }} + creationPolicy: Owner + data: + - secretKey: {{ .Values.users.app.passwordSecretKey }} + remoteRef: + key: {{ required "secrets.externalSecret.remoteRefs.app.password is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.remoteRefs.app.password }} +{{- if .Values.users.metrics.enabled }} +--- +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: {{ .Values.users.metrics.passwordSecretName }} + labels: + {{- include "countly-mongodb.labels" . | nindent 4 }} + annotations: + {{- include "countly-mongodb.syncWave" (dict "wave" "0" "root" .) | nindent 4 }} +spec: + refreshInterval: {{ .Values.secrets.externalSecret.refreshInterval | default "1h" }} + secretStoreRef: + name: {{ required "secrets.externalSecret.secretStoreRef.name is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.secretStoreRef.name }} + kind: {{ .Values.secrets.externalSecret.secretStoreRef.kind | default "ClusterSecretStore" }} + target: + name: {{ .Values.users.metrics.passwordSecretName }} + creationPolicy: Owner + data: + - secretKey: {{ .Values.users.metrics.passwordSecretKey }} + remoteRef: + key: {{ required "secrets.externalSecret.remoteRefs.metrics.password is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.remoteRefs.metrics.password }} +{{- end }} {{- end }} diff --git a/charts/countly-mongodb/values.schema.json b/charts/countly-mongodb/values.schema.json index 4276610..420fa71 100644 --- a/charts/countly-mongodb/values.schema.json +++ b/charts/countly-mongodb/values.schema.json @@ -69,7 +69,41 @@ "secrets": { "type": "object", "properties": { - "keep": { "type": "boolean" } + "keep": { "type": "boolean" }, + "mode": { + "type": "string", + "enum": ["values", "existingSecret", "externalSecret"] + }, + "externalSecret": { + "type": "object", + "properties": { + "refreshInterval": { "type": "string" }, + "secretStoreRef": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "kind": { "type": "string" } + } + }, + "remoteRefs": { + "type": "object", + "properties": { + "app": { + "type": "object", + "properties": { + "password": { "type": "string" } + } + }, + "metrics": { + "type": "object", + "properties": { + "password": { "type": "string" } + } + } + } + } + } + } } } } diff --git a/charts/countly-mongodb/values.yaml b/charts/countly-mongodb/values.yaml index ed96741..0926c6b 100644 --- a/charts/countly-mongodb/values.yaml +++ b/charts/countly-mongodb/values.yaml @@ -106,3 +106,14 @@ networkPolicy: secrets: keep: true + mode: values # values | existingSecret | externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRefs: + app: + password: "" + metrics: + password: "" diff --git a/charts/countly/templates/external-secret-clickhouse.yaml b/charts/countly/templates/external-secret-clickhouse.yaml index 9b57d3e..47936b8 100644 --- a/charts/countly/templates/external-secret-clickhouse.yaml +++ b/charts/countly/templates/external-secret-clickhouse.yaml @@ -1,6 +1,6 @@ {{- if eq (.Values.secrets.mode | default "values") "externalSecret" }} {{- if not .Values.secrets.clickhouse.existingSecret }} -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: name: {{ include "countly.fullname" . }}-clickhouse diff --git a/charts/countly/templates/external-secret-common.yaml b/charts/countly/templates/external-secret-common.yaml index 3ebab1c..ee2f3ca 100644 --- a/charts/countly/templates/external-secret-common.yaml +++ b/charts/countly/templates/external-secret-common.yaml @@ -1,6 +1,6 @@ {{- if eq (.Values.secrets.mode | default "values") "externalSecret" }} {{- if not .Values.secrets.common.existingSecret }} -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: name: {{ include "countly.fullname" . }}-common diff --git a/charts/countly/templates/external-secret-kafka.yaml b/charts/countly/templates/external-secret-kafka.yaml index dc4e145..6398cbd 100644 --- a/charts/countly/templates/external-secret-kafka.yaml +++ b/charts/countly/templates/external-secret-kafka.yaml @@ -1,6 +1,6 @@ {{- if eq (.Values.secrets.mode | default "values") "externalSecret" }} {{- if not .Values.secrets.kafka.existingSecret }} -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: name: {{ include "countly.fullname" . }}-kafka diff --git a/charts/countly/templates/external-secret-mongodb.yaml b/charts/countly/templates/external-secret-mongodb.yaml index 5f0603c..5011b22 100644 --- a/charts/countly/templates/external-secret-mongodb.yaml +++ b/charts/countly/templates/external-secret-mongodb.yaml @@ -1,6 +1,6 @@ {{- if eq (.Values.secrets.mode | default "values") "externalSecret" }} {{- if not .Values.secrets.mongodb.existingSecret }} -apiVersion: external-secrets.io/v1beta1 +apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: name: {{ include "countly.fullname" . }}-mongodb diff --git a/environments/reference/README.md b/environments/reference/README.md index a2a0335..cceff05 100644 --- a/environments/reference/README.md +++ b/environments/reference/README.md @@ -20,10 +20,10 @@ This directory is a complete starting point for a new Countly deployment. - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` 3. Fill in required secrets in the chart-specific files: - - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `clickhouse.yaml` → `auth.defaultUserPassword.password` - - `kafka.yaml` → `kafkaConnect.clickhouse.password` + - `secrets-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `secrets-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `secrets-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `secrets-kafka.yaml` → `kafkaConnect.clickhouse.password` - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` Or use `secrets.example.yaml` as a complete reference. @@ -48,7 +48,7 @@ See `secrets.example.yaml` for a complete list of all required secrets. For production, choose one of: - **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) - **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `secrets-*.yaml` files - **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) For private registries such as GAR, also create namespaced image pull secrets. diff --git a/environments/reference/external-secrets.example.yaml b/environments/reference/external-secrets.example.yaml index dfd38d4..df42f94 100644 --- a/environments/reference/external-secrets.example.yaml +++ b/environments/reference/external-secrets.example.yaml @@ -2,30 +2,69 @@ # External Secrets Operator (ESO) Configuration Example # ============================================================================= # When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in environments//countly.yaml: +# in the chart-specific secrets files under environments//: # -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: my-secret-store -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "countly/encryption-reports-key" -# webSessionSecret: "countly/web-session-secret" -# passwordSecret: "countly/password-secret" -# clickhouse: -# url: "countly/clickhouse-url" -# username: "countly/clickhouse-username" -# password: "countly/clickhouse-password" -# database: "countly/clickhouse-database" -# kafka: -# brokers: "countly/kafka-brokers" -# securityProtocol: "countly/kafka-security-protocol" -# mongodb: -# connectionString: "countly/mongodb-connection-string" +# environments//secrets-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "customers/acme/countly/encryption-reports-key" +# webSessionSecret: "customers/acme/countly/web-session-secret" +# passwordSecret: "customers/acme/countly/password-secret" +# clickhouse: +# url: "customers/acme/countly/clickhouse-url" +# username: "customers/acme/countly/clickhouse-username" +# password: "customers/acme/countly/clickhouse-password" +# database: "customers/acme/countly/clickhouse-database" +# kafka: +# brokers: "customers/acme/countly/kafka-brokers" +# securityProtocol: "customers/acme/countly/kafka-security-protocol" +# mongodb: +# connectionString: "customers/acme/countly/mongodb-connection-string" +# +# environments//secrets-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "customers/acme/clickhouse/default-user-password" +# +# environments//secrets-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# username: "customers/acme/kafka-connect/clickhouse-username" +# password: "customers/acme/kafka-connect/clickhouse-password" +# +# environments//secrets-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# app: +# password: "customers/acme/mongodb/app-password" +# metrics: +# password: "customers/acme/mongodb/metrics-password" # # For GAR image pulls, configure this in environments//global.yaml: # diff --git a/environments/reference/secrets-clickhouse.yaml b/environments/reference/secrets-clickhouse.yaml index 17c22f1..e008a75 100644 --- a/environments/reference/secrets-clickhouse.yaml +++ b/environments/reference/secrets-clickhouse.yaml @@ -1,4 +1,18 @@ # ClickHouse secrets — FILL IN before first deploy +secrets: + mode: values + auth: defaultUserPassword: password: "" # REQUIRED: must match secrets-countly.yaml secrets.clickhouse.password + +# For Secret Manager / External Secrets instead of direct values: +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "customers/acme/clickhouse/default-user-password" diff --git a/environments/reference/secrets-countly.yaml b/environments/reference/secrets-countly.yaml index 1d60be2..025a845 100644 --- a/environments/reference/secrets-countly.yaml +++ b/environments/reference/secrets-countly.yaml @@ -14,3 +14,27 @@ secrets: securityProtocol: "PLAINTEXT" mongodb: password: "" # REQUIRED: must match secrets-mongodb.yaml + +# For Secret Manager / External Secrets instead of direct values: +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "customers/acme/countly/encryption-reports-key" +# webSessionSecret: "customers/acme/countly/web-session-secret" +# passwordSecret: "customers/acme/countly/password-secret" +# clickhouse: +# url: "customers/acme/countly/clickhouse-url" +# username: "customers/acme/countly/clickhouse-username" +# password: "customers/acme/countly/clickhouse-password" +# database: "customers/acme/countly/clickhouse-database" +# kafka: +# brokers: "customers/acme/countly/kafka-brokers" +# securityProtocol: "customers/acme/countly/kafka-security-protocol" +# mongodb: +# connectionString: "customers/acme/countly/mongodb-connection-string" diff --git a/environments/reference/secrets-kafka.yaml b/environments/reference/secrets-kafka.yaml index 9b587b7..bd5fdae 100644 --- a/environments/reference/secrets-kafka.yaml +++ b/environments/reference/secrets-kafka.yaml @@ -1,4 +1,20 @@ # Kafka secrets — FILL IN before first deploy +secrets: + mode: values + kafkaConnect: clickhouse: password: "" # REQUIRED: must match ClickHouse default user password + +# For Secret Manager / External Secrets instead of direct values: +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# username: "customers/acme/kafka-connect/clickhouse-username" +# password: "customers/acme/kafka-connect/clickhouse-password" diff --git a/environments/reference/secrets-mongodb.yaml b/environments/reference/secrets-mongodb.yaml index ae59611..26e4a70 100644 --- a/environments/reference/secrets-mongodb.yaml +++ b/environments/reference/secrets-mongodb.yaml @@ -1,7 +1,24 @@ # MongoDB secrets — FILL IN before first deploy +secrets: + mode: values + users: app: password: "" # REQUIRED: must match secrets-countly.yaml secrets.mongodb.password metrics: enabled: true password: "" # REQUIRED: metrics exporter password + +# For Secret Manager / External Secrets instead of direct values: +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# app: +# password: "customers/acme/mongodb/app-password" +# metrics: +# password: "customers/acme/mongodb/metrics-password" diff --git a/environments/reference/secrets.example.yaml b/environments/reference/secrets.example.yaml index 282eb0d..1b1bbe5 100644 --- a/environments/reference/secrets.example.yaml +++ b/environments/reference/secrets.example.yaml @@ -15,6 +15,7 @@ # --- countly chart (environments//secrets-countly.yaml) --- secrets: + mode: values common: encryptionReportsKey: "CHANGEME-min-8-chars" webSessionSecret: "CHANGEME-min-8-chars" @@ -25,6 +26,8 @@ secrets: password: "CHANGEME-match-mongodb-chart" # --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- +secrets: + mode: values users: app: password: "CHANGEME-match-secrets.mongodb.password" @@ -32,11 +35,25 @@ users: password: "CHANGEME-metrics-exporter" # --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- +secrets: + mode: values auth: defaultUserPassword: password: "CHANGEME-match-secrets.clickhouse.password" # --- countly-kafka chart (environments//secrets-kafka.yaml) --- +secrets: + mode: values kafkaConnect: clickhouse: password: "CHANGEME-match-clickhouse-password" + +# For External Secrets Operator, switch the per-chart file to: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore From 917c471dd0d7b51e27f1df207b515d576eef8655 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 00:33:27 +0530 Subject: [PATCH 39/79] Add optional MongoDB admin user support --- .../templates/mongodbcommunity.yaml | 11 +++++ .../templates/secret-passwords.yaml | 48 +++++++++++++++++++ charts/countly-mongodb/values.schema.json | 18 +++++++ charts/countly-mongodb/values.yaml | 17 +++++++ .../reference/external-secrets.example.yaml | 2 + environments/reference/secrets-mongodb.yaml | 5 ++ environments/reference/secrets.example.yaml | 3 ++ 7 files changed, 104 insertions(+) diff --git a/charts/countly-mongodb/templates/mongodbcommunity.yaml b/charts/countly-mongodb/templates/mongodbcommunity.yaml index e3ba8d5..242aaab 100644 --- a/charts/countly-mongodb/templates/mongodbcommunity.yaml +++ b/charts/countly-mongodb/templates/mongodbcommunity.yaml @@ -19,6 +19,17 @@ spec: enabled: true {{- end }} users: + {{- if .Values.users.admin.enabled }} + - name: {{ .Values.users.admin.name }} + db: {{ .Values.users.admin.database }} + passwordSecretRef: + name: {{ .Values.users.admin.passwordSecretName }} + key: {{ .Values.users.admin.passwordSecretKey }} + roles: + {{- toYaml .Values.users.admin.roles | nindent 8 }} + scramCredentialsSecretName: {{ .Values.users.admin.name }}-scram + connectionStringSecretName: {{ include "countly-mongodb.fullname" . }}-{{ .Values.users.admin.name }}-mongodb-conn + {{- end }} - name: {{ .Values.users.app.name }} db: {{ .Values.users.app.database }} passwordSecretRef: diff --git a/charts/countly-mongodb/templates/secret-passwords.yaml b/charts/countly-mongodb/templates/secret-passwords.yaml index 368692b..06cfc2b 100644 --- a/charts/countly-mongodb/templates/secret-passwords.yaml +++ b/charts/countly-mongodb/templates/secret-passwords.yaml @@ -1,4 +1,29 @@ {{- if eq (.Values.secrets.mode | default "values") "values" }} +{{- if .Values.users.admin.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.users.admin.passwordSecretName }} + labels: + {{- include "countly-mongodb.labels" . | nindent 4 }} + annotations: + {{- if .Values.secrets.keep }} + helm.sh/resource-policy: keep + {{- end }} + {{- include "countly-mongodb.syncWave" (dict "wave" "0" "root" .) | nindent 4 }} +type: Opaque +data: + {{ .Values.users.admin.passwordSecretKey }}: |- + {{- $existing := lookup "v1" "Secret" .Release.Namespace .Values.users.admin.passwordSecretName -}} + {{- if and $existing (not .Values.users.admin.password) }} + {{ index $existing.data .Values.users.admin.passwordSecretKey }} + {{- else if .Values.users.admin.password }} + {{ .Values.users.admin.password | b64enc }} + {{- else }} + {{- fail "MongoDB admin user password is required on first install when users.admin.enabled=true. Set users.admin.password." }} + {{- end }} +--- +{{- end }} apiVersion: v1 kind: Secret metadata: @@ -48,6 +73,29 @@ data: {{- end }} {{- end }} {{- if eq (.Values.secrets.mode | default "values") "externalSecret" }} +{{- if .Values.users.admin.enabled }} +--- +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: {{ .Values.users.admin.passwordSecretName }} + labels: + {{- include "countly-mongodb.labels" . | nindent 4 }} + annotations: + {{- include "countly-mongodb.syncWave" (dict "wave" "0" "root" .) | nindent 4 }} +spec: + refreshInterval: {{ .Values.secrets.externalSecret.refreshInterval | default "1h" }} + secretStoreRef: + name: {{ required "secrets.externalSecret.secretStoreRef.name is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.secretStoreRef.name }} + kind: {{ .Values.secrets.externalSecret.secretStoreRef.kind | default "ClusterSecretStore" }} + target: + name: {{ .Values.users.admin.passwordSecretName }} + creationPolicy: Owner + data: + - secretKey: {{ .Values.users.admin.passwordSecretKey }} + remoteRef: + key: {{ required "secrets.externalSecret.remoteRefs.admin.password is required when users.admin.enabled=true and secrets.mode=externalSecret" .Values.secrets.externalSecret.remoteRefs.admin.password }} +{{- end }} --- apiVersion: external-secrets.io/v1 kind: ExternalSecret diff --git a/charts/countly-mongodb/values.schema.json b/charts/countly-mongodb/values.schema.json index 420fa71..d3c27e7 100644 --- a/charts/countly-mongodb/values.schema.json +++ b/charts/countly-mongodb/values.schema.json @@ -31,6 +31,18 @@ "users": { "type": "object", "properties": { + "admin": { + "type": "object", + "properties": { + "enabled": { "type": "boolean" }, + "name": { "type": "string" }, + "database": { "type": "string" }, + "roles": { "type": "array" }, + "passwordSecretName": { "type": "string" }, + "passwordSecretKey": { "type": "string" }, + "password": { "type": "string" } + } + }, "app": { "type": "object", "required": ["name", "database", "passwordSecretName", "passwordSecretKey"], @@ -94,6 +106,12 @@ "password": { "type": "string" } } }, + "admin": { + "type": "object", + "properties": { + "password": { "type": "string" } + } + }, "metrics": { "type": "object", "properties": { diff --git a/charts/countly-mongodb/values.yaml b/charts/countly-mongodb/values.yaml index 0926c6b..430fec8 100644 --- a/charts/countly-mongodb/values.yaml +++ b/charts/countly-mongodb/values.yaml @@ -44,6 +44,21 @@ mongodb: enabled: false users: + admin: + enabled: false + name: admin + database: admin + roles: + - name: root + db: admin + - name: userAdminAnyDatabase + db: admin + - name: dbAdminAnyDatabase + db: admin + passwordSecretName: admin-user-password + passwordSecretKey: password + password: "" + app: name: app database: admin @@ -113,6 +128,8 @@ secrets: name: "" kind: ClusterSecretStore remoteRefs: + admin: + password: "" app: password: "" metrics: diff --git a/environments/reference/external-secrets.example.yaml b/environments/reference/external-secrets.example.yaml index df42f94..54776b4 100644 --- a/environments/reference/external-secrets.example.yaml +++ b/environments/reference/external-secrets.example.yaml @@ -61,6 +61,8 @@ # name: gcp-secrets # kind: ClusterSecretStore # remoteRefs: +# admin: +# password: "customers/acme/mongodb/admin-password" # app: # password: "customers/acme/mongodb/app-password" # metrics: diff --git a/environments/reference/secrets-mongodb.yaml b/environments/reference/secrets-mongodb.yaml index 26e4a70..5b76ba9 100644 --- a/environments/reference/secrets-mongodb.yaml +++ b/environments/reference/secrets-mongodb.yaml @@ -3,6 +3,9 @@ secrets: mode: values users: + admin: + enabled: false + password: "" # OPTIONAL: super admin/root-style user when enabled app: password: "" # REQUIRED: must match secrets-countly.yaml secrets.mongodb.password metrics: @@ -18,6 +21,8 @@ users: # name: gcp-secrets # kind: ClusterSecretStore # remoteRefs: +# admin: +# password: "customers/acme/mongodb/admin-password" # app: # password: "customers/acme/mongodb/app-password" # metrics: diff --git a/environments/reference/secrets.example.yaml b/environments/reference/secrets.example.yaml index 1b1bbe5..a18a98c 100644 --- a/environments/reference/secrets.example.yaml +++ b/environments/reference/secrets.example.yaml @@ -29,6 +29,9 @@ secrets: secrets: mode: values users: + admin: + enabled: false + password: "CHANGEME-super-admin" app: password: "CHANGEME-match-secrets.mongodb.password" metrics: From 7ab6b694afe6e4797c3e043b08eb7775833f35ca Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 00:35:06 +0530 Subject: [PATCH 40/79] Enable MongoDB admin user by default --- charts/countly-mongodb/values.yaml | 2 +- environments/reference/secrets-mongodb.yaml | 4 ++-- environments/reference/secrets.example.yaml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/charts/countly-mongodb/values.yaml b/charts/countly-mongodb/values.yaml index 430fec8..a68b859 100644 --- a/charts/countly-mongodb/values.yaml +++ b/charts/countly-mongodb/values.yaml @@ -45,7 +45,7 @@ mongodb: users: admin: - enabled: false + enabled: true name: admin database: admin roles: diff --git a/environments/reference/secrets-mongodb.yaml b/environments/reference/secrets-mongodb.yaml index 5b76ba9..a33e7cc 100644 --- a/environments/reference/secrets-mongodb.yaml +++ b/environments/reference/secrets-mongodb.yaml @@ -4,8 +4,8 @@ secrets: users: admin: - enabled: false - password: "" # OPTIONAL: super admin/root-style user when enabled + enabled: true + password: "" # REQUIRED: MongoDB super admin/root-style user app: password: "" # REQUIRED: must match secrets-countly.yaml secrets.mongodb.password metrics: diff --git a/environments/reference/secrets.example.yaml b/environments/reference/secrets.example.yaml index a18a98c..07af390 100644 --- a/environments/reference/secrets.example.yaml +++ b/environments/reference/secrets.example.yaml @@ -30,7 +30,7 @@ secrets: mode: values users: admin: - enabled: false + enabled: true password: "CHANGEME-super-admin" app: password: "CHANGEME-match-secrets.mongodb.password" From 97dbf70d83666b1b76a90c9c2e9dd06e07514f20 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 00:49:01 +0530 Subject: [PATCH 41/79] Use Secret Manager-backed app secrets for gcr-argo --- environments/gcr-argo/secrets-clickhouse.yaml | 14 +++++-- environments/gcr-argo/secrets-countly.yaml | 37 +++++++++++-------- environments/gcr-argo/secrets-kafka.yaml | 16 ++++++-- environments/gcr-argo/secrets-mongodb.yaml | 22 +++++++++-- 4 files changed, 62 insertions(+), 27 deletions(-) diff --git a/environments/gcr-argo/secrets-clickhouse.yaml b/environments/gcr-argo/secrets-clickhouse.yaml index 4bc6a1c..d67ff5d 100644 --- a/environments/gcr-argo/secrets-clickhouse.yaml +++ b/environments/gcr-argo/secrets-clickhouse.yaml @@ -1,4 +1,10 @@ -# ClickHouse secrets — FILL IN before first deploy -auth: - defaultUserPassword: - password: "GcrArgoClickhouse2026!" +# ClickHouse secrets from Google Secret Manager via External Secrets Operator +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: customers-gcr-argo-clickhouse-default-user-password diff --git a/environments/gcr-argo/secrets-countly.yaml b/environments/gcr-argo/secrets-countly.yaml index c80690a..f2308b2 100644 --- a/environments/gcr-argo/secrets-countly.yaml +++ b/environments/gcr-argo/secrets-countly.yaml @@ -1,16 +1,23 @@ -# Countly secrets — FILL IN before first deploy -# Passwords must match across charts (see secrets.example.yaml) +# Countly secrets from Google Secret Manager via External Secrets Operator secrets: - mode: values - common: - encryptionReportsKey: "gcr-argo-encryption-key-2026" - webSessionSecret: "gcr-argo-web-session-2026" - passwordSecret: "gcr-argo-password-secret-2026" - clickhouse: - username: "default" - password: "GcrArgoClickhouse2026!" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - mongodb: - password: "GcrArgoMongo2026!" + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: customers-gcr-argo-countly-encryption-reports-key + webSessionSecret: customers-gcr-argo-countly-web-session-secret + passwordSecret: customers-gcr-argo-countly-password-secret + clickhouse: + url: customers-gcr-argo-countly-clickhouse-url + username: customers-gcr-argo-countly-clickhouse-username + password: customers-gcr-argo-countly-clickhouse-password + database: customers-gcr-argo-countly-clickhouse-database + kafka: + brokers: customers-gcr-argo-countly-kafka-brokers + securityProtocol: customers-gcr-argo-countly-kafka-security-protocol + mongodb: + connectionString: customers-gcr-argo-countly-mongodb-connection-string diff --git a/environments/gcr-argo/secrets-kafka.yaml b/environments/gcr-argo/secrets-kafka.yaml index d84c02c..fecfced 100644 --- a/environments/gcr-argo/secrets-kafka.yaml +++ b/environments/gcr-argo/secrets-kafka.yaml @@ -1,4 +1,12 @@ -# Kafka secrets — FILL IN before first deploy -kafkaConnect: - clickhouse: - password: "GcrArgoClickhouse2026!" +# Kafka Connect secrets from Google Secret Manager via External Secrets Operator +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + username: customers-gcr-argo-kafka-connect-clickhouse-username + password: customers-gcr-argo-kafka-connect-clickhouse-password diff --git a/environments/gcr-argo/secrets-mongodb.yaml b/environments/gcr-argo/secrets-mongodb.yaml index 7e53925..f4958f6 100644 --- a/environments/gcr-argo/secrets-mongodb.yaml +++ b/environments/gcr-argo/secrets-mongodb.yaml @@ -1,7 +1,21 @@ -# MongoDB secrets — FILL IN before first deploy +# MongoDB secrets from Google Secret Manager via External Secrets Operator +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: customers-gcr-argo-mongodb-admin-password + app: + password: customers-gcr-argo-mongodb-app-password + metrics: + password: customers-gcr-argo-mongodb-metrics-password + users: - app: - password: "GcrArgoMongo2026!" + admin: + enabled: true metrics: enabled: true - password: "GcrArgoMongoMetrics2026!" From eadf9b600769c8e5997e487d95bde7b4111c924e Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 00:58:08 +0530 Subject: [PATCH 42/79] Limit gcr-argo app secrets to passwords only --- .../external-secret-clickhouse-connect.yaml | 13 ++++++++++ charts/countly/templates/_helpers.tpl | 24 +++++++++++++++++++ .../templates/external-secret-clickhouse.yaml | 23 ++++++++++++++++++ .../templates/external-secret-common.yaml | 18 ++++++++++++++ .../templates/external-secret-kafka.yaml | 23 ++++++++++++++++++ .../templates/external-secret-mongodb.yaml | 15 ++++++++++++ charts/countly/values.yaml | 1 + environments/gcr-argo/secrets-countly.yaml | 23 +++++++++--------- environments/gcr-argo/secrets-kafka.yaml | 2 +- 9 files changed, 129 insertions(+), 13 deletions(-) diff --git a/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml b/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml index f6e17a5..3414d88 100644 --- a/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml +++ b/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml @@ -15,11 +15,24 @@ spec: target: name: {{ .Values.kafkaConnect.clickhouse.secretName }} creationPolicy: Owner + template: + engineVersion: v2 + data: + {{- if not .Values.secrets.externalSecret.remoteRefs.clickhouse.username }} + username: {{ .Values.kafkaConnect.clickhouse.username | quote }} + {{- end }} + {{- if not .Values.secrets.externalSecret.remoteRefs.clickhouse.password }} + password: {{ .Values.kafkaConnect.clickhouse.password | quote }} + {{- end }} data: + {{- if .Values.secrets.externalSecret.remoteRefs.clickhouse.username }} - secretKey: username remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.clickhouse.username is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.remoteRefs.clickhouse.username }} + {{- end }} + {{- if .Values.secrets.externalSecret.remoteRefs.clickhouse.password }} - secretKey: password remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.clickhouse.password is required when secrets.mode=externalSecret" .Values.secrets.externalSecret.remoteRefs.clickhouse.password }} + {{- end }} {{- end }} diff --git a/charts/countly/templates/_helpers.tpl b/charts/countly/templates/_helpers.tpl index 341668a..bb4521e 100644 --- a/charts/countly/templates/_helpers.tpl +++ b/charts/countly/templates/_helpers.tpl @@ -142,6 +142,30 @@ mongodb://{{ $user }}:{{ $pass }}@{{ $host }}:{{ $port }}/{{ $db }}?replicaSet={ {{- end -}} {{- end -}} +{{/* +MongoDB connection string computation using an explicit password value. +Used by ExternalSecret templates where the password may come from the secret backend. +*/}} +{{- define "countly.mongodb.connectionStringWithPassword" -}} +{{- $root := .root -}} +{{- $pass := .password -}} +{{- $bs := ($root.Values.backingServices).mongodb | default dict -}} +{{- $connStr := $bs.connectionString -}} +{{- if $connStr -}} +{{- $connStr -}} +{{- else -}} +{{- if not $pass -}} +{{- fail "MongoDB password is required. Set backingServices.mongodb.password, secrets.mongodb.password, or secrets.externalSecret.remoteRefs.mongodb.password." -}} +{{- end -}} +{{- $host := $bs.host | default (printf "%s-mongodb-svc.%s.svc.cluster.local" $root.Release.Name ($root.Values.mongodbNamespace | default "mongodb")) -}} +{{- $port := $bs.port | default "27017" -}} +{{- $user := $bs.username | default "app" -}} +{{- $db := $bs.database | default "admin" -}} +{{- $rs := $bs.replicaSet | default (printf "%s-mongodb" $root.Release.Name) -}} +mongodb://{{ $user }}:{{ $pass }}@{{ $host }}:{{ $port }}/{{ $db }}?replicaSet={{ $rs }}&ssl=false +{{- end -}} +{{- end -}} + {{/* Kafka Connect API URL computation. Reads from backingServices.kafka.connectApiUrl; falls back to in-cluster DNS. diff --git a/charts/countly/templates/external-secret-clickhouse.yaml b/charts/countly/templates/external-secret-clickhouse.yaml index 47936b8..ea5fab3 100644 --- a/charts/countly/templates/external-secret-clickhouse.yaml +++ b/charts/countly/templates/external-secret-clickhouse.yaml @@ -18,18 +18,41 @@ spec: target: name: {{ include "countly.fullname" . }}-clickhouse creationPolicy: Owner + template: + engineVersion: v2 + data: + {{- if not .Values.secrets.externalSecret.remoteRefs.clickhouse.url }} + COUNTLY_CONFIG__CLICKHOUSE_URL: {{ include "countly.clickhouse.url" . | quote }} + {{- end }} + {{- if not .Values.secrets.externalSecret.remoteRefs.clickhouse.username }} + COUNTLY_CONFIG__CLICKHOUSE_USERNAME: {{ (.Values.secrets.clickhouse.username | default ((.Values.backingServices).clickhouse).username | default "default") | quote }} + {{- end }} + {{- if not .Values.secrets.externalSecret.remoteRefs.clickhouse.password }} + COUNTLY_CONFIG__CLICKHOUSE_PASSWORD: {{ (.Values.secrets.clickhouse.password | default ((.Values.backingServices).clickhouse).password) | quote }} + {{- end }} + {{- if not .Values.secrets.externalSecret.remoteRefs.clickhouse.database }} + COUNTLY_CONFIG__CLICKHOUSE_DATABASE: {{ (.Values.secrets.clickhouse.database | default ((.Values.backingServices).clickhouse).database | default "countly_drill") | quote }} + {{- end }} data: + {{- if .Values.secrets.externalSecret.remoteRefs.clickhouse.url }} - secretKey: COUNTLY_CONFIG__CLICKHOUSE_URL remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.clickhouse.url is required" .Values.secrets.externalSecret.remoteRefs.clickhouse.url }} + {{- end }} + {{- if .Values.secrets.externalSecret.remoteRefs.clickhouse.username }} - secretKey: COUNTLY_CONFIG__CLICKHOUSE_USERNAME remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.clickhouse.username is required" .Values.secrets.externalSecret.remoteRefs.clickhouse.username }} + {{- end }} + {{- if .Values.secrets.externalSecret.remoteRefs.clickhouse.password }} - secretKey: COUNTLY_CONFIG__CLICKHOUSE_PASSWORD remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.clickhouse.password is required" .Values.secrets.externalSecret.remoteRefs.clickhouse.password }} + {{- end }} + {{- if .Values.secrets.externalSecret.remoteRefs.clickhouse.database }} - secretKey: COUNTLY_CONFIG__CLICKHOUSE_DATABASE remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.clickhouse.database is required" .Values.secrets.externalSecret.remoteRefs.clickhouse.database }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/countly/templates/external-secret-common.yaml b/charts/countly/templates/external-secret-common.yaml index ee2f3ca..fe11750 100644 --- a/charts/countly/templates/external-secret-common.yaml +++ b/charts/countly/templates/external-secret-common.yaml @@ -18,15 +18,33 @@ spec: target: name: {{ include "countly.fullname" . }}-common creationPolicy: Owner + template: + engineVersion: v2 + data: + {{- if not .Values.secrets.externalSecret.remoteRefs.common.encryptionReportsKey }} + COUNTLY_CONFIG__ENCRYPTION_REPORTS_KEY: {{ .Values.secrets.common.encryptionReportsKey | quote }} + {{- end }} + {{- if not .Values.secrets.externalSecret.remoteRefs.common.webSessionSecret }} + COUNTLY_CONFIG__WEB_SESSION_SECRET: {{ .Values.secrets.common.webSessionSecret | quote }} + {{- end }} + {{- if not .Values.secrets.externalSecret.remoteRefs.common.passwordSecret }} + COUNTLY_CONFIG__PASSWORDSECRET: {{ .Values.secrets.common.passwordSecret | quote }} + {{- end }} data: + {{- if .Values.secrets.externalSecret.remoteRefs.common.encryptionReportsKey }} - secretKey: COUNTLY_CONFIG__ENCRYPTION_REPORTS_KEY remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.common.encryptionReportsKey is required" .Values.secrets.externalSecret.remoteRefs.common.encryptionReportsKey }} + {{- end }} + {{- if .Values.secrets.externalSecret.remoteRefs.common.webSessionSecret }} - secretKey: COUNTLY_CONFIG__WEB_SESSION_SECRET remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.common.webSessionSecret is required" .Values.secrets.externalSecret.remoteRefs.common.webSessionSecret }} + {{- end }} + {{- if .Values.secrets.externalSecret.remoteRefs.common.passwordSecret }} - secretKey: COUNTLY_CONFIG__PASSWORDSECRET remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.common.passwordSecret is required" .Values.secrets.externalSecret.remoteRefs.common.passwordSecret }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/countly/templates/external-secret-kafka.yaml b/charts/countly/templates/external-secret-kafka.yaml index 6398cbd..4648368 100644 --- a/charts/countly/templates/external-secret-kafka.yaml +++ b/charts/countly/templates/external-secret-kafka.yaml @@ -18,13 +18,36 @@ spec: target: name: {{ include "countly.fullname" . }}-kafka creationPolicy: Owner + template: + engineVersion: v2 + data: + {{- if not .Values.secrets.externalSecret.remoteRefs.kafka.brokers }} + COUNTLY_CONFIG__KAFKA_RDKAFKA_BROKERS: {{ include "countly.kafka.brokers" . | quote }} + {{- end }} + {{- if not .Values.secrets.externalSecret.remoteRefs.kafka.securityProtocol }} + COUNTLY_CONFIG__KAFKA_RDKAFKA_SECURITYPROTOCOL: {{ (.Values.secrets.kafka.securityProtocol | default ((.Values.backingServices).kafka).securityProtocol | default "PLAINTEXT") | quote }} + {{- end }} + {{- $saslMechanism := .Values.secrets.kafka.saslMechanism | default ((.Values.backingServices).kafka).saslMechanism }} + {{- if and $saslMechanism (not .Values.secrets.externalSecret.remoteRefs.kafka.saslMechanism) }} + COUNTLY_CONFIG__KAFKA_RDKAFKA_SASLMECHANISM: {{ $saslMechanism | quote }} + {{- end }} + {{- if and $saslMechanism (not .Values.secrets.externalSecret.remoteRefs.kafka.saslUsername) }} + COUNTLY_CONFIG__KAFKA_RDKAFKA_SASLUSERNAME: {{ (.Values.secrets.kafka.saslUsername | default ((.Values.backingServices).kafka).saslUsername) | quote }} + {{- end }} + {{- if and $saslMechanism (not .Values.secrets.externalSecret.remoteRefs.kafka.saslPassword) }} + COUNTLY_CONFIG__KAFKA_RDKAFKA_SASLPASSWORD: {{ (.Values.secrets.kafka.saslPassword | default ((.Values.backingServices).kafka).saslPassword) | quote }} + {{- end }} data: + {{- if .Values.secrets.externalSecret.remoteRefs.kafka.brokers }} - secretKey: COUNTLY_CONFIG__KAFKA_RDKAFKA_BROKERS remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.kafka.brokers is required" .Values.secrets.externalSecret.remoteRefs.kafka.brokers }} + {{- end }} + {{- if .Values.secrets.externalSecret.remoteRefs.kafka.securityProtocol }} - secretKey: COUNTLY_CONFIG__KAFKA_RDKAFKA_SECURITYPROTOCOL remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.kafka.securityProtocol is required" .Values.secrets.externalSecret.remoteRefs.kafka.securityProtocol }} + {{- end }} {{- if .Values.secrets.externalSecret.remoteRefs.kafka.saslMechanism }} - secretKey: COUNTLY_CONFIG__KAFKA_RDKAFKA_SASLMECHANISM remoteRef: diff --git a/charts/countly/templates/external-secret-mongodb.yaml b/charts/countly/templates/external-secret-mongodb.yaml index 5011b22..ddd1443 100644 --- a/charts/countly/templates/external-secret-mongodb.yaml +++ b/charts/countly/templates/external-secret-mongodb.yaml @@ -18,9 +18,24 @@ spec: target: name: {{ include "countly.fullname" . }}-mongodb creationPolicy: Owner + template: + engineVersion: v2 + data: + {{- if .Values.secrets.externalSecret.remoteRefs.mongodb.connectionString }} + {{- else if .Values.secrets.externalSecret.remoteRefs.mongodb.password }} + {{ .Values.secrets.mongodb.key | default "connectionString.standard" }}: {{ include "countly.mongodb.connectionStringWithPassword" (dict "root" . "password" "{{ .mongodbPassword }}") | quote }} + {{- else }} + {{ .Values.secrets.mongodb.key | default "connectionString.standard" }}: {{ include "countly.mongodb.connectionString" . | quote }} + {{- end }} data: + {{- if .Values.secrets.externalSecret.remoteRefs.mongodb.connectionString }} - secretKey: {{ .Values.secrets.mongodb.key | default "connectionString.standard" }} remoteRef: key: {{ required "secrets.externalSecret.remoteRefs.mongodb.connectionString is required" .Values.secrets.externalSecret.remoteRefs.mongodb.connectionString }} + {{- else if .Values.secrets.externalSecret.remoteRefs.mongodb.password }} + - secretKey: mongodbPassword + remoteRef: + key: {{ required "secrets.externalSecret.remoteRefs.mongodb.password is required" .Values.secrets.externalSecret.remoteRefs.mongodb.password }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/countly/values.yaml b/charts/countly/values.yaml index 203ccc1..cf1081d 100644 --- a/charts/countly/values.yaml +++ b/charts/countly/values.yaml @@ -458,6 +458,7 @@ secrets: saslPassword: "" mongodb: connectionString: "" + password: "" # --- Network Policy --- diff --git a/environments/gcr-argo/secrets-countly.yaml b/environments/gcr-argo/secrets-countly.yaml index f2308b2..31c1cb1 100644 --- a/environments/gcr-argo/secrets-countly.yaml +++ b/environments/gcr-argo/secrets-countly.yaml @@ -1,23 +1,22 @@ -# Countly secrets from Google Secret Manager via External Secrets Operator +# Countly secrets: static values in Git, passwords from Google Secret Manager via External Secrets Operator secrets: mode: externalSecret + common: + encryptionReportsKey: "gcr-argo-encryption-key-2026" + webSessionSecret: "gcr-argo-web-session-2026" + passwordSecret: "gcr-argo-password-secret-2026" + clickhouse: + username: "default" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" externalSecret: refreshInterval: "1h" secretStoreRef: name: gcp-secrets kind: ClusterSecretStore remoteRefs: - common: - encryptionReportsKey: customers-gcr-argo-countly-encryption-reports-key - webSessionSecret: customers-gcr-argo-countly-web-session-secret - passwordSecret: customers-gcr-argo-countly-password-secret clickhouse: - url: customers-gcr-argo-countly-clickhouse-url - username: customers-gcr-argo-countly-clickhouse-username password: customers-gcr-argo-countly-clickhouse-password - database: customers-gcr-argo-countly-clickhouse-database - kafka: - brokers: customers-gcr-argo-countly-kafka-brokers - securityProtocol: customers-gcr-argo-countly-kafka-security-protocol mongodb: - connectionString: customers-gcr-argo-countly-mongodb-connection-string + password: customers-gcr-argo-countly-mongodb-password diff --git a/environments/gcr-argo/secrets-kafka.yaml b/environments/gcr-argo/secrets-kafka.yaml index fecfced..acc0e8c 100644 --- a/environments/gcr-argo/secrets-kafka.yaml +++ b/environments/gcr-argo/secrets-kafka.yaml @@ -1,6 +1,7 @@ # Kafka Connect secrets from Google Secret Manager via External Secrets Operator secrets: mode: externalSecret + # keep non-sensitive static values here; fetch only passwords remotely externalSecret: refreshInterval: "1h" secretStoreRef: @@ -8,5 +9,4 @@ secrets: kind: ClusterSecretStore remoteRefs: clickhouse: - username: customers-gcr-argo-kafka-connect-clickhouse-username password: customers-gcr-argo-kafka-connect-clickhouse-password From a9c67db83934190bd53c54a1f90cedbad47f4493 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 01:42:14 +0530 Subject: [PATCH 43/79] Fallback countly common and kafka secrets to values --- charts/countly/templates/external-secret-common.yaml | 4 ++++ charts/countly/templates/external-secret-kafka.yaml | 4 ++++ charts/countly/templates/secret-common.yaml | 4 +++- charts/countly/templates/secret-kafka.yaml | 4 +++- 4 files changed, 14 insertions(+), 2 deletions(-) diff --git a/charts/countly/templates/external-secret-common.yaml b/charts/countly/templates/external-secret-common.yaml index fe11750..949c7bb 100644 --- a/charts/countly/templates/external-secret-common.yaml +++ b/charts/countly/templates/external-secret-common.yaml @@ -1,5 +1,8 @@ {{- if eq (.Values.secrets.mode | default "values") "externalSecret" }} {{- if not .Values.secrets.common.existingSecret }} +{{- $commonRemote := .Values.secrets.externalSecret.remoteRefs.common | default dict -}} +{{- $commonUsesExternal := or $commonRemote.encryptionReportsKey $commonRemote.webSessionSecret $commonRemote.passwordSecret -}} +{{- if $commonUsesExternal }} apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: @@ -48,3 +51,4 @@ spec: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/charts/countly/templates/external-secret-kafka.yaml b/charts/countly/templates/external-secret-kafka.yaml index 4648368..8c6f2ec 100644 --- a/charts/countly/templates/external-secret-kafka.yaml +++ b/charts/countly/templates/external-secret-kafka.yaml @@ -1,5 +1,8 @@ {{- if eq (.Values.secrets.mode | default "values") "externalSecret" }} {{- if not .Values.secrets.kafka.existingSecret }} +{{- $kafkaRemote := .Values.secrets.externalSecret.remoteRefs.kafka | default dict -}} +{{- $kafkaUsesExternal := or $kafkaRemote.brokers $kafkaRemote.securityProtocol $kafkaRemote.saslMechanism $kafkaRemote.saslUsername $kafkaRemote.saslPassword -}} +{{- if $kafkaUsesExternal }} apiVersion: external-secrets.io/v1 kind: ExternalSecret metadata: @@ -65,3 +68,4 @@ spec: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/charts/countly/templates/secret-common.yaml b/charts/countly/templates/secret-common.yaml index 2308a71..8d17ecb 100644 --- a/charts/countly/templates/secret-common.yaml +++ b/charts/countly/templates/secret-common.yaml @@ -1,4 +1,6 @@ -{{- if and (ne (.Values.secrets.mode | default "values") "externalSecret") (not .Values.secrets.common.existingSecret) }} +{{- $commonRemote := .Values.secrets.externalSecret.remoteRefs.common | default dict -}} +{{- $commonUsesExternal := or $commonRemote.encryptionReportsKey $commonRemote.webSessionSecret $commonRemote.passwordSecret -}} +{{- if and (or (ne (.Values.secrets.mode | default "values") "externalSecret") (not $commonUsesExternal)) (not .Values.secrets.common.existingSecret) }} apiVersion: v1 kind: Secret metadata: diff --git a/charts/countly/templates/secret-kafka.yaml b/charts/countly/templates/secret-kafka.yaml index 18ed231..f71473a 100644 --- a/charts/countly/templates/secret-kafka.yaml +++ b/charts/countly/templates/secret-kafka.yaml @@ -1,4 +1,6 @@ -{{- if and (ne (.Values.secrets.mode | default "values") "externalSecret") (not .Values.secrets.kafka.existingSecret) }} +{{- $kafkaRemote := .Values.secrets.externalSecret.remoteRefs.kafka | default dict -}} +{{- $kafkaUsesExternal := or $kafkaRemote.brokers $kafkaRemote.securityProtocol $kafkaRemote.saslMechanism $kafkaRemote.saslUsername $kafkaRemote.saslPassword -}} +{{- if and (or (ne (.Values.secrets.mode | default "values") "externalSecret") (not $kafkaUsesExternal)) (not .Values.secrets.kafka.existingSecret) }} apiVersion: v1 kind: Secret metadata: From b6efd9e5a77c9ef077914ebe028dfc6348626f35 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 01:58:30 +0530 Subject: [PATCH 44/79] Fetch countly common secrets from Secret Manager --- environments/gcr-argo/secrets-countly.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/environments/gcr-argo/secrets-countly.yaml b/environments/gcr-argo/secrets-countly.yaml index 31c1cb1..6f7311e 100644 --- a/environments/gcr-argo/secrets-countly.yaml +++ b/environments/gcr-argo/secrets-countly.yaml @@ -1,10 +1,6 @@ # Countly secrets: static values in Git, passwords from Google Secret Manager via External Secrets Operator secrets: mode: externalSecret - common: - encryptionReportsKey: "gcr-argo-encryption-key-2026" - webSessionSecret: "gcr-argo-web-session-2026" - passwordSecret: "gcr-argo-password-secret-2026" clickhouse: username: "default" database: "countly_drill" @@ -16,6 +12,10 @@ secrets: name: gcp-secrets kind: ClusterSecretStore remoteRefs: + common: + encryptionReportsKey: customers-gcr-argo-countly-encryption-reports-key + webSessionSecret: customers-gcr-argo-countly-web-session-secret + passwordSecret: customers-gcr-argo-countly-password-secret clickhouse: password: customers-gcr-argo-countly-clickhouse-password mongodb: From 7d011d6ae67bd2d6c7d0e87aa523b0cdefa93654 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 02:10:16 +0530 Subject: [PATCH 45/79] Skip empty common secret template data --- charts/countly/templates/external-secret-common.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/charts/countly/templates/external-secret-common.yaml b/charts/countly/templates/external-secret-common.yaml index 949c7bb..38cb29a 100644 --- a/charts/countly/templates/external-secret-common.yaml +++ b/charts/countly/templates/external-secret-common.yaml @@ -21,6 +21,8 @@ spec: target: name: {{ include "countly.fullname" . }}-common creationPolicy: Owner + {{- $hasCommonTemplateData := not (and $commonRemote.encryptionReportsKey $commonRemote.webSessionSecret $commonRemote.passwordSecret) }} + {{- if $hasCommonTemplateData }} template: engineVersion: v2 data: @@ -33,6 +35,7 @@ spec: {{- if not .Values.secrets.externalSecret.remoteRefs.common.passwordSecret }} COUNTLY_CONFIG__PASSWORDSECRET: {{ .Values.secrets.common.passwordSecret | quote }} {{- end }} + {{- end }} data: {{- if .Values.secrets.externalSecret.remoteRefs.common.encryptionReportsKey }} - secretKey: COUNTLY_CONFIG__ENCRYPTION_REPORTS_KEY From c4e42078e37d228f23a97f0050c2d9d709c27b64 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 02:47:59 +0530 Subject: [PATCH 46/79] Standardize secret naming and document onboarding --- README.md | 13 + argocd/ONBOARDING.md | 741 ++++++++++++++++++ argocd/README.md | 22 + docs/SECRET-MANAGEMENT.md | 29 +- .../gcr-argo/external-secrets.example.yaml | 82 +- .../reference/external-secrets.example.yaml | 29 +- .../reference/secrets-clickhouse.yaml | 2 +- environments/reference/secrets-countly.yaml | 16 +- environments/reference/secrets-kafka.yaml | 3 +- environments/reference/secrets-mongodb.yaml | 6 +- scripts/new-argocd-customer.sh | 3 +- 11 files changed, 875 insertions(+), 71 deletions(-) create mode 100644 argocd/ONBOARDING.md diff --git a/README.md b/README.md index cc60422..6971511 100644 --- a/README.md +++ b/README.md @@ -180,6 +180,19 @@ For GitOps-managed pull secrets, start from [environments/reference/image-pull-s For Secret Manager + External Secrets Operator, set `global.imagePullSecretExternalSecret` in your environment `global.yaml` so Countly and Kafka Connect each create their own namespaced `dockerconfigjson` pull secret. Application secrets can use the same pattern in `secrets-countly.yaml`, `secrets-kafka.yaml`, `secrets-clickhouse.yaml`, and `secrets-mongodb.yaml` by switching `secrets.mode` to `externalSecret` and filling `secrets.externalSecret.remoteRefs`. +Recommended Secret Manager naming convention: +- `-gar-dockerconfig` +- `-countly-encryption-reports-key` +- `-countly-web-session-secret` +- `-countly-password-secret` +- `-countly-clickhouse-password` +- `-countly-mongodb-password` +- `-kafka-connect-clickhouse-password` +- `-clickhouse-default-user-password` +- `-mongodb-admin-password` +- `-mongodb-app-password` +- `-mongodb-metrics-password` + ### GitOps Customer Onboarding For Argo CD managed deployments, scaffold a new customer/cluster with: diff --git a/argocd/ONBOARDING.md b/argocd/ONBOARDING.md new file mode 100644 index 0000000..4cd3d07 --- /dev/null +++ b/argocd/ONBOARDING.md @@ -0,0 +1,741 @@ +# Customer Onboarding Guide + +This guide is written as a slow, step-by-step walkthrough for adding one new customer cluster. + +Use this when you want to: +- create a new customer deployment +- choose between direct secrets and Secret Manager +- connect Argo CD to the new cluster +- troubleshoot the common issues we already hit once + +## What You Are Building + +For each customer, you will end up with: +- one Argo CD customer metadata file +- one environment folder with customer overrides +- one target Kubernetes cluster +- one set of Argo CD applications created automatically by `countly-bootstrap` +- one secret strategy: + - direct values in Git, or + - Google Secret Manager + External Secrets Operator + +### Visual Map + +```mermaid +flowchart TD + meta["argocd/customers/.yaml"] + env["environments//"] + bootstrap["Argo CD app: countly-bootstrap"] + appsets["ApplicationSets"] + cluster["Customer cluster"] + + meta --> bootstrap + env --> bootstrap + bootstrap --> appsets + + appsets --> eso["-external-secrets"] + appsets --> store["-cluster-secret-store"] + appsets --> mongo["-mongodb"] + appsets --> ch["-clickhouse"] + appsets --> kafka["-kafka"] + appsets --> countly["-countly"] + appsets --> obs["-observability (optional)"] + appsets --> mig["-migration (optional)"] + + eso --> cluster + store --> cluster + mongo --> cluster + ch --> cluster + kafka --> cluster + countly --> cluster + obs --> cluster + mig --> cluster +``` + +## The Secret Naming Rule + +Use this naming convention everywhere: + +```text +-- +``` + +Examples for customer `northstar`: + +```text +northstar-gar-dockerconfig +northstar-countly-encryption-reports-key +northstar-countly-web-session-secret +northstar-countly-password-secret +northstar-countly-clickhouse-password +northstar-countly-mongodb-password +northstar-kafka-connect-clickhouse-password +northstar-clickhouse-default-user-password +northstar-mongodb-admin-password +northstar-mongodb-app-password +northstar-mongodb-metrics-password +``` + +Keep the `` slug exactly the same in: +- `argocd/customers/.yaml` +- `environments//` +- Secret Manager secret names + +## Before You Start + +Make sure these are already true: + +1. You can access the repo. +2. You can access the Argo CD instance. +3. The target cluster exists. +4. The target cluster is registered in Argo CD. +5. DNS is ready for the customer hostname. + +Useful checks: + +```bash +argocd app list +argocd cluster list +kubectl config current-context +``` + +## Step 1: Create The Customer Scaffold + +Run: + +```bash +./scripts/new-argocd-customer.sh +``` + +Example: + +```bash +./scripts/new-argocd-customer.sh northstar https://1.2.3.4 analytics.northstar.example.com +``` + +This creates: +- `argocd/customers/northstar.yaml` +- `environments/northstar/` + +## How To Read Argo CD For One Customer + +Yes, in the current setup all customer apps appear in the same Argo CD dashboard view. +That is normal. + +The important trick is: every generated app starts with the customer slug. + +For customer `northstar`, you should expect app names like: + +```text +northstar-cluster-secret-store +northstar-external-secrets +northstar-mongodb +northstar-clickhouse +northstar-kafka +northstar-countly +northstar-observability +northstar-migration +``` + +So when the UI feels crowded, think: +- first filter by the customer slug +- then read the apps from platform first, then data stores, then Countly + +### Fast CLI Filters + +List only one customer's apps: + +```bash +kubectl get applications -n argocd | grep '^northstar-' +``` + +Get only one app: + +```bash +argocd app get northstar-countly +argocd app get northstar-kafka +``` + +Refresh one app: + +```bash +argocd app get northstar-countly --hard-refresh +``` + +Sync one app: + +```bash +argocd app sync northstar-countly +``` + +Terminate a stuck sync: + +```bash +argocd app terminate-op northstar-countly +``` + +### Best Order To Read A Broken Customer + +When one customer is failing, read the apps in this order: + +1. `northstar-cluster-secret-store` +2. `northstar-external-secrets` +3. `northstar-mongodb` +4. `northstar-clickhouse` +5. `northstar-kafka` +6. `northstar-countly` + +Why this order: +- if Secret Manager auth is broken, app secrets fail later +- if MongoDB or ClickHouse is broken, Countly can still show as unhealthy later +- if Kafka is broken, Countly ingestion can fail later + +### Healthy First-Rollout Shape + +This is the rough order you want to see in Argo CD: + +```mermaid +flowchart LR + A["-cluster-secret-store"] --> B["-external-secrets"] + B --> C["-mongodb"] + B --> D["-clickhouse"] + D --> E["-kafka"] + C --> F["-countly"] + D --> F + E --> F +``` + +If `countly` is unhealthy, do not start there immediately. +Walk backward to Kafka, ClickHouse, MongoDB, and secret-store health first. + +## Step 2: Fill In Customer Metadata + +Open: + +- `argocd/customers/.yaml` + +Set these carefully: +- `server` +- `gcpServiceAccountEmail` +- `secretManagerProjectID` +- `clusterProjectID` +- `clusterName` +- `clusterLocation` +- `hostname` +- `sizing` +- `security` +- `tls` +- `observability` +- `kafkaConnect` +- `migration` + +Example: + +```yaml +customer: northstar +environment: northstar +project: customer-platform +server: https://1.2.3.4 +gcpServiceAccountEmail: northstar-eso@example-secrets-project.iam.gserviceaccount.com +secretManagerProjectID: example-secrets-project +clusterProjectID: example-gke-project +clusterName: northstar-prod +clusterLocation: us-central1-a +hostname: analytics.northstar.example.com +sizing: tier1 +security: hardened +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled +``` + +## Step 3: Choose Your Secret Mode + +You have two valid ways to run a customer. + +### Option A: Direct Values + +Use this when: +- you are testing quickly +- you are on an internal sandbox +- you are not ready to set up Secret Manager yet + +What to do: +- keep `secrets.mode: values` +- fill the passwords directly in: + - `environments//secrets-countly.yaml` + - `environments//secrets-kafka.yaml` + - `environments//secrets-clickhouse.yaml` + - `environments//secrets-mongodb.yaml` + +### Option B: Secret Manager + +Use this when: +- you do not want app passwords in Git +- you want customer isolation +- you want the production path + +What to do: +- set `secrets.mode: externalSecret` in the files that should read from GSM +- create the matching secrets in Google Secret Manager +- let External Secrets Operator create the Kubernetes `Secret`s for you + +## Step 4: If Using GAR, Decide Image Pull Mode + +There are two image pull patterns: + +### Direct / Public Pulls + +Use: + +```yaml +global: + imageSource: + mode: direct +``` + +### GAR + Secret Manager Pull Secret + +Use: + +```yaml +global: + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: us-docker.pkg.dev// + imagePullSecrets: + - name: countly-registry + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRef: + key: -gar-dockerconfig +``` + +## Step 5: If Using Secret Manager, Prepare The Cluster + +This is the production path. + +### 5.1 Install ESO Through Argo + +The repo already has the Argo pieces for: +- External Secrets Operator +- per-customer `ClusterSecretStore` + +After customer metadata is committed, `countly-bootstrap` will create them. + +### 5.2 Enable Workload Identity On The Cluster + +Check: + +```bash +gcloud container clusters describe \ + --zone \ + --project \ + --format="value(workloadIdentityConfig.workloadPool)" +``` + +Expected: + +```text +.svc.id.goog +``` + +Then check node pool metadata mode: + +```bash +gcloud container node-pools describe \ + --cluster \ + --zone \ + --project \ + --format="value(config.workloadMetadataConfig.mode)" +``` + +Expected: + +```text +GKE_METADATA +``` + +If these are wrong, External Secrets will fail. + +### 5.3 Bind The Kubernetes Service Account To The GCP Service Account + +The External Secrets service account in namespace `external-secrets` must be annotated with: + +```yaml +iam.gke.io/gcp-service-account: +``` + +The GCP service account also needs: +- `roles/iam.workloadIdentityUser` +- access to the Secret Manager secrets you want to read + +### 5.4 Verify The ClusterSecretStore + +Check: + +```bash +kubectl get clustersecretstores.external-secrets.io +kubectl describe clustersecretstores.external-secrets.io gcp-secrets +``` + +Healthy looks like: +- `STATUS=Valid` +- `READY=True` + +If you see `InvalidProviderConfig`, first check Workload Identity. + +## Step 6: Create Secrets In Google Secret Manager + +Use names like: + +```text +northstar-countly-encryption-reports-key +northstar-countly-web-session-secret +northstar-countly-password-secret +northstar-countly-clickhouse-password +northstar-countly-mongodb-password +northstar-kafka-connect-clickhouse-password +northstar-clickhouse-default-user-password +northstar-mongodb-admin-password +northstar-mongodb-app-password +northstar-mongodb-metrics-password +northstar-gar-dockerconfig +``` + +If your org policy blocks global replication, create secrets with user-managed regional replication: + +```bash +gcloud secrets create northstar-countly-clickhouse-password \ + --replication-policy=user-managed \ + --locations=europe-west1 +printf '%s' 'StrongPasswordHere' | gcloud secrets versions add northstar-countly-clickhouse-password --data-file=- +``` + +Create one secret at a time if you are debugging. It is easier to spot mistakes. + +## Step 7: Map The Secrets In Environment Files + +### Countly + +File: +- `environments/reference/secrets-countly.yaml` + +Secret Manager mode example: + +```yaml +secrets: + mode: externalSecret + externalSecret: + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: northstar-countly-encryption-reports-key + webSessionSecret: northstar-countly-web-session-secret + passwordSecret: northstar-countly-password-secret + clickhouse: + password: northstar-countly-clickhouse-password + mongodb: + password: northstar-countly-mongodb-password + clickhouse: + username: default + database: countly_drill + kafka: + securityProtocol: PLAINTEXT +``` + +### Kafka + +File: +- `environments/reference/secrets-kafka.yaml` + +```yaml +secrets: + mode: externalSecret + externalSecret: + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + password: northstar-kafka-connect-clickhouse-password +``` + +### ClickHouse + +File: +- `environments/reference/secrets-clickhouse.yaml` + +```yaml +secrets: + mode: externalSecret + externalSecret: + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: northstar-clickhouse-default-user-password +``` + +### MongoDB + +File: +- `environments/reference/secrets-mongodb.yaml` + +```yaml +secrets: + mode: externalSecret + externalSecret: + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: northstar-mongodb-admin-password + app: + password: northstar-mongodb-app-password + metrics: + password: northstar-mongodb-metrics-password +``` + +## Step 8: Commit And Sync + +Commit: + +```bash +git add argocd/customers/.yaml environments/ +git commit -m "Add customer" +git push origin +``` + +Sync bootstrap: + +```bash +argocd app sync countly-bootstrap +``` + +Then inspect the generated customer apps: + +```bash +kubectl get applications -n argocd | grep +``` + +If needed, sync apps one by one: + +```bash +argocd app sync -cluster-secret-store +argocd app sync -external-secrets +argocd app sync -mongodb +argocd app sync -clickhouse +argocd app sync -kafka +argocd app sync -countly +``` + +## Step 9: Verify That Secrets Landed + +Check ExternalSecrets: + +```bash +kubectl get externalsecrets.external-secrets.io -n countly +kubectl get externalsecrets.external-secrets.io -n kafka +kubectl get externalsecrets.external-secrets.io -n clickhouse +kubectl get externalsecrets.external-secrets.io -n mongodb +``` + +Check the created Kubernetes secrets: + +```bash +kubectl get secret -n countly +kubectl get secret -n kafka +kubectl get secret -n clickhouse +kubectl get secret -n mongodb +``` + +If you want to inspect only one customer's secret-related resources, these are useful: + +```bash +kubectl get externalsecrets.external-secrets.io -n countly +kubectl get externalsecrets.external-secrets.io -n kafka +kubectl get externalsecrets.external-secrets.io -n clickhouse +kubectl get externalsecrets.external-secrets.io -n mongodb + +kubectl describe clustersecretstores.external-secrets.io gcp-secrets +kubectl describe externalsecret -n countly countly-common +kubectl describe externalsecret -n countly countly-clickhouse +kubectl describe externalsecret -n countly countly-mongodb +kubectl describe externalsecret -n kafka clickhouse-auth +``` + +## Step 10: Verify The Workloads + +Check pods: + +```bash +kubectl get pods -n countly +kubectl get pods -n kafka +kubectl get pods -n clickhouse +kubectl get pods -n mongodb +``` + +Check ingress: + +```bash +kubectl get ingress -n countly +curl -Ik https:// +``` + +## Switching Between Direct Values And Secret Manager + +This is meant to be easy. + +### To Move From Direct Values To Secret Manager + +1. Create the secrets in Google Secret Manager. +2. Change `secrets.mode: values` to `secrets.mode: externalSecret`. +3. Add the matching `remoteRefs`. +4. Commit and sync. + +### To Move From Secret Manager Back To Direct Values + +1. Put the values back into the `secrets-*.yaml` files. +2. Change `secrets.mode: externalSecret` to `secrets.mode: values`. +3. Remove the `remoteRefs`. +4. Commit and sync. + +The charts are designed so this is a values change, not a template rewrite. + +## Troubleshooting + +### `ClusterSecretStore` says `InvalidProviderConfig` + +Usually means: +- Workload Identity is not enabled +- node pool metadata mode is wrong +- GCP service account binding is wrong + +Check: + +```bash +kubectl describe clustersecretstores.external-secrets.io gcp-secrets +kubectl get sa -n external-secrets external-secrets -o yaml +kubectl logs -n external-secrets deploy/external-secrets +``` + +### `ExternalSecret` says secret does not exist + +Usually means: +- the secret name in `remoteRefs` is wrong +- the secret exists in the wrong GCP project +- the GCP service account cannot read it + +Check: + +```bash +gcloud secrets list --project= +kubectl describe externalsecret -n +``` + +### Argo CD UI shows too many apps and it is hard to focus on one customer + +Use the customer slug as your filter. + +Examples: + +```bash +kubectl get applications -n argocd | grep '^northstar-' +argocd app get northstar-cluster-secret-store +argocd app get northstar-external-secrets +argocd app get northstar-kafka +argocd app get northstar-countly +``` + +If you are debugging one customer, do not scan the whole dashboard. +Filter down to that one slug first. + +### One customer is broken but others are healthy + +That usually means the shared templates are fine and the customer-specific inputs are wrong. + +Check these first: +- `argocd/customers/.yaml` +- `environments//global.yaml` +- `environments//secrets-*.yaml` +- the Secret Manager secret names for that customer +- the Argo destination server for that customer + +This is a good quick drill: + +```bash +argocd app get -cluster-secret-store +argocd app get -external-secrets +argocd app get -mongodb +argocd app get -clickhouse +argocd app get -kafka +argocd app get -countly +``` + +If all other customers are fine and only one is broken, assume: +- wrong customer metadata +- wrong GSM secret names +- wrong IAM / Workload Identity for that cluster +- wrong customer-specific overrides + +before assuming the shared chart templates are broken. + +### Secret creation fails with location policy errors + +Use: + +```bash +gcloud secrets create \ + --replication-policy=user-managed \ + --locations=europe-west1 +``` + +### Argo app is on the right revision but old ExternalSecret specs still exist + +Delete the stale `ExternalSecret` objects and resync the app. + +### Kafka is degraded with `Pod is unschedulable or is not starting` + +This is usually capacity or topology, not Argo. + +Check: + +```bash +kubectl get pods -n kafka -o wide +kubectl get events -n kafka --sort-by=.lastTimestamp | tail -50 +kubectl get nodes +``` + +## Multi-Customer Rule Of Thumb + +For every new customer, keep this structure: +- one customer metadata file +- one environment folder +- one cluster +- one GCP service account +- one set of GSM secrets + +Do not share customer passwords across customers. +Do not reuse one customer secret name for another customer. + +## What To Do Next + +Once this guide is in place, the next normal step is: + +1. scaffold the customer +2. choose secret mode +3. fill metadata +4. create secrets if using GSM +5. commit +6. sync `countly-bootstrap` +7. verify the generated apps diff --git a/argocd/README.md b/argocd/README.md index 924d575..489c0f7 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -11,6 +11,8 @@ The short version: 5. Sync `countly-bootstrap`. 6. Argo CD creates the per-customer apps automatically. +For a slower, step-by-step walkthrough, see [ONBOARDING.md](/Users/admin/cly/helm/argocd/ONBOARDING.md). + ## Folder Overview - `root-application.yaml` @@ -150,6 +152,26 @@ For external secret deployments: - use your external secret setup instead of committing direct values - set `gcpServiceAccountEmail` in the customer metadata so the per-customer External Secrets operator can use Workload Identity - for GAR image pulls, store Docker config JSON in Google Secret Manager and point `global.imagePullSecretExternalSecret.remoteRef.key` to that secret +- use the flat secret naming convention `--` + +Recommended secret names: + +- `-gar-dockerconfig` +- `-countly-encryption-reports-key` +- `-countly-web-session-secret` +- `-countly-password-secret` +- `-countly-clickhouse-password` +- `-countly-mongodb-password` +- `-kafka-connect-clickhouse-password` +- `-clickhouse-default-user-password` +- `-mongodb-admin-password` +- `-mongodb-app-password` +- `-mongodb-metrics-password` + +Note: +- existing customer environments may still use older secret names +- use the new convention for all new customers +- migrate older customers only as a planned change ## Important Rules diff --git a/docs/SECRET-MANAGEMENT.md b/docs/SECRET-MANAGEMENT.md index 879fd27..89dba18 100644 --- a/docs/SECRET-MANAGEMENT.md +++ b/docs/SECRET-MANAGEMENT.md @@ -54,21 +54,28 @@ secrets: kind: ClusterSecretStore remoteRefs: common: - encryptionReportsKey: "countly/encryption-reports-key" - webSessionSecret: "countly/web-session-secret" - passwordSecret: "countly/password-secret" + encryptionReportsKey: "acme-countly-encryption-reports-key" + webSessionSecret: "acme-countly-web-session-secret" + passwordSecret: "acme-countly-password-secret" clickhouse: - url: "countly/clickhouse-url" - username: "countly/clickhouse-username" - password: "countly/clickhouse-password" - database: "countly/clickhouse-database" - kafka: - brokers: "countly/kafka-brokers" - securityProtocol: "countly/kafka-security-protocol" + password: "acme-countly-clickhouse-password" mongodb: - connectionString: "countly/mongodb-connection-string" + password: "acme-countly-mongodb-password" ``` +Recommended naming convention: +- `-gar-dockerconfig` +- `-countly-encryption-reports-key` +- `-countly-web-session-secret` +- `-countly-password-secret` +- `-countly-clickhouse-password` +- `-countly-mongodb-password` +- `-kafka-connect-clickhouse-password` +- `-clickhouse-default-user-password` +- `-mongodb-admin-password` +- `-mongodb-app-password` +- `-mongodb-metrics-password` + ## Required Secrets All secrets are required on first install. On upgrades, existing values are preserved automatically. diff --git a/environments/gcr-argo/external-secrets.example.yaml b/environments/gcr-argo/external-secrets.example.yaml index dfd38d4..1d4a964 100644 --- a/environments/gcr-argo/external-secrets.example.yaml +++ b/environments/gcr-argo/external-secrets.example.yaml @@ -2,30 +2,64 @@ # External Secrets Operator (ESO) Configuration Example # ============================================================================= # When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in environments//countly.yaml: +# in the chart-specific files under environments//: # -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: my-secret-store -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "countly/encryption-reports-key" -# webSessionSecret: "countly/web-session-secret" -# passwordSecret: "countly/password-secret" -# clickhouse: -# url: "countly/clickhouse-url" -# username: "countly/clickhouse-username" -# password: "countly/clickhouse-password" -# database: "countly/clickhouse-database" -# kafka: -# brokers: "countly/kafka-brokers" -# securityProtocol: "countly/kafka-security-protocol" -# mongodb: -# connectionString: "countly/mongodb-connection-string" +# environments//secrets-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" +# clickhouse: +# password: "acme-countly-clickhouse-password" +# mongodb: +# password: "acme-countly-mongodb-password" +# +# environments//secrets-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# password: "acme-kafka-connect-clickhouse-password" +# +# environments//secrets-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "acme-clickhouse-default-user-password" +# +# environments//secrets-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# admin: +# password: "acme-mongodb-admin-password" +# app: +# password: "acme-mongodb-app-password" +# metrics: +# password: "acme-mongodb-metrics-password" # # For GAR image pulls, configure this in environments//global.yaml: # @@ -39,7 +73,7 @@ # name: gcp-secrets # kind: ClusterSecretStore # remoteRef: -# key: "customers/acme/gar-dockerconfig" +# key: "acme-gar-dockerconfig" # # Prerequisites: # 1. Install External Secrets Operator: https://external-secrets.io/ diff --git a/environments/reference/external-secrets.example.yaml b/environments/reference/external-secrets.example.yaml index 54776b4..cb9fd43 100644 --- a/environments/reference/external-secrets.example.yaml +++ b/environments/reference/external-secrets.example.yaml @@ -14,19 +14,13 @@ # kind: ClusterSecretStore # remoteRefs: # common: -# encryptionReportsKey: "customers/acme/countly/encryption-reports-key" -# webSessionSecret: "customers/acme/countly/web-session-secret" -# passwordSecret: "customers/acme/countly/password-secret" +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" # clickhouse: -# url: "customers/acme/countly/clickhouse-url" -# username: "customers/acme/countly/clickhouse-username" -# password: "customers/acme/countly/clickhouse-password" -# database: "customers/acme/countly/clickhouse-database" -# kafka: -# brokers: "customers/acme/countly/kafka-brokers" -# securityProtocol: "customers/acme/countly/kafka-security-protocol" +# password: "acme-countly-clickhouse-password" # mongodb: -# connectionString: "customers/acme/countly/mongodb-connection-string" +# password: "acme-countly-mongodb-password" # # environments//secrets-clickhouse.yaml # secrets: @@ -37,7 +31,7 @@ # name: gcp-secrets # kind: ClusterSecretStore # remoteRefs: -# defaultUserPassword: "customers/acme/clickhouse/default-user-password" +# defaultUserPassword: "acme-clickhouse-default-user-password" # # environments//secrets-kafka.yaml # secrets: @@ -49,8 +43,7 @@ # kind: ClusterSecretStore # remoteRefs: # clickhouse: -# username: "customers/acme/kafka-connect/clickhouse-username" -# password: "customers/acme/kafka-connect/clickhouse-password" +# password: "acme-kafka-connect-clickhouse-password" # # environments//secrets-mongodb.yaml # secrets: @@ -62,11 +55,11 @@ # kind: ClusterSecretStore # remoteRefs: # admin: -# password: "customers/acme/mongodb/admin-password" +# password: "acme-mongodb-admin-password" # app: -# password: "customers/acme/mongodb/app-password" +# password: "acme-mongodb-app-password" # metrics: -# password: "customers/acme/mongodb/metrics-password" +# password: "acme-mongodb-metrics-password" # # For GAR image pulls, configure this in environments//global.yaml: # @@ -80,7 +73,7 @@ # name: gcp-secrets # kind: ClusterSecretStore # remoteRef: -# key: "customers/acme/gar-dockerconfig" +# key: "acme-gar-dockerconfig" # # Prerequisites: # 1. Install External Secrets Operator: https://external-secrets.io/ diff --git a/environments/reference/secrets-clickhouse.yaml b/environments/reference/secrets-clickhouse.yaml index e008a75..03933c9 100644 --- a/environments/reference/secrets-clickhouse.yaml +++ b/environments/reference/secrets-clickhouse.yaml @@ -15,4 +15,4 @@ auth: # name: gcp-secrets # kind: ClusterSecretStore # remoteRefs: -# defaultUserPassword: "customers/acme/clickhouse/default-user-password" +# defaultUserPassword: "acme-clickhouse-default-user-password" diff --git a/environments/reference/secrets-countly.yaml b/environments/reference/secrets-countly.yaml index 025a845..bc71814 100644 --- a/environments/reference/secrets-countly.yaml +++ b/environments/reference/secrets-countly.yaml @@ -25,16 +25,10 @@ secrets: # kind: ClusterSecretStore # remoteRefs: # common: -# encryptionReportsKey: "customers/acme/countly/encryption-reports-key" -# webSessionSecret: "customers/acme/countly/web-session-secret" -# passwordSecret: "customers/acme/countly/password-secret" +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" # clickhouse: -# url: "customers/acme/countly/clickhouse-url" -# username: "customers/acme/countly/clickhouse-username" -# password: "customers/acme/countly/clickhouse-password" -# database: "customers/acme/countly/clickhouse-database" -# kafka: -# brokers: "customers/acme/countly/kafka-brokers" -# securityProtocol: "customers/acme/countly/kafka-security-protocol" +# password: "acme-countly-clickhouse-password" # mongodb: -# connectionString: "customers/acme/countly/mongodb-connection-string" +# password: "acme-countly-mongodb-password" diff --git a/environments/reference/secrets-kafka.yaml b/environments/reference/secrets-kafka.yaml index bd5fdae..44d1609 100644 --- a/environments/reference/secrets-kafka.yaml +++ b/environments/reference/secrets-kafka.yaml @@ -16,5 +16,4 @@ kafkaConnect: # kind: ClusterSecretStore # remoteRefs: # clickhouse: -# username: "customers/acme/kafka-connect/clickhouse-username" -# password: "customers/acme/kafka-connect/clickhouse-password" +# password: "acme-kafka-connect-clickhouse-password" diff --git a/environments/reference/secrets-mongodb.yaml b/environments/reference/secrets-mongodb.yaml index a33e7cc..f6ce392 100644 --- a/environments/reference/secrets-mongodb.yaml +++ b/environments/reference/secrets-mongodb.yaml @@ -22,8 +22,8 @@ users: # kind: ClusterSecretStore # remoteRefs: # admin: -# password: "customers/acme/mongodb/admin-password" +# password: "acme-mongodb-admin-password" # app: -# password: "customers/acme/mongodb/app-password" +# password: "acme-mongodb-app-password" # metrics: -# password: "customers/acme/mongodb/metrics-password" +# password: "acme-mongodb-metrics-password" diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index 034184f..219d668 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -150,5 +150,6 @@ Next: 1. Fill in environments/${customer}/secrets-*.yaml 2. Set argocd/customers/${customer}.yaml GCP and cluster metadata for External Secrets 3. Review environments/${customer}/*.yaml for customer-specific overrides - 4. Commit and sync countly-bootstrap + 4. Create Secret Manager secrets using the ${customer}-- convention + 5. Commit and sync countly-bootstrap EOF From 60d224a6d1b9f3cd2510388ed7122475a037f5e5 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 14:03:31 +0530 Subject: [PATCH 47/79] Document image sources and ownership --- README.md | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/README.md b/README.md index 6971511..0f05a39 100644 --- a/README.md +++ b/README.md @@ -210,6 +210,40 @@ Then: 2. commit 3. sync `countly-bootstrap` +## Image Sources + +This table shows which images are used by the platform, where they are pulled from, and whether they are Countly-provided or official upstream/vendor images. + +| Component | Image / Pattern | Source Registry | Ownership | Private/GAR Ready | +|-------|-------|-------|-------|-------| +| Countly app pods (`api`, `frontend`, `ingestor`, `aggregator`, `jobserver`) | `gcr.io/countly-dev-313620/countly-unified:26.01` or `/countly-unified` | `gcr.io` or `us-docker.pkg.dev` | Countly-provided | Yes | +| Kafka Connect ClickHouse | `gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi` or `/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi` | `gcr.io` or `us-docker.pkg.dev` | Countly-provided custom image | Yes | +| ClickHouse server | `clickhouse/clickhouse-server:26.2` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | +| ClickHouse keeper | `clickhouse/clickhouse-keeper:26.2` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | +| MongoDB database | chosen by MongoDB Kubernetes Operator from `version: 8.2.5` | operator-resolved upstream image | Official provider image | No, not via current chart values | +| MongoDB exporter | `percona/mongodb_exporter:0.40.0` | Docker Hub style namespace | Official provider/vendor image | No | +| Migration service | `countly/migration:` | configurable, default public-style repo | Countly-provided | Not wired to GAR automatically | +| Prometheus | `prom/prometheus:v3.10.0` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Loki | `grafana/loki:3.6.7` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Tempo | `grafana/tempo:2.10.1` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Pyroscope | `grafana/pyroscope:1.18.1` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Grafana | `grafana/grafana:12.4.0` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Alloy / Alloy OTLP / Alloy Metrics | `grafana/alloy:v1.13.2` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| kube-state-metrics | `registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.18.0` | `registry.k8s.io` | Official provider image | Only via `global.imageRegistry` mirror | +| node-exporter | `prom/node-exporter:v1.10.2` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| busybox init/test containers | `busybox:1.35` | Docker Hub | Official provider image | No explicit mirror logic | + +Operator and platform apps are pinned by Helm chart version in `argocd/operators/`, so this repo controls the chart source and version, but not every underlying container image directly: + +| Operator/App | Source | Version | Ownership | +|-------|-------|-------|-------| +| cert-manager | Jetstack chart | `v1.17.2` | Official provider | +| External Secrets Operator | external-secrets chart | `1.3.1` | Official provider | +| Strimzi Kafka Operator | Strimzi chart | `0.51.0` | Official provider | +| ClickHouse Operator | GHCR OCI chart | `0.0.2` | Official provider | +| MongoDB Kubernetes Operator | MongoDB chart | `1.7.0` | Official provider | +| F5 NGINX Ingress | NGINX chart | `2.1.0` | Official provider | + ### Manual Installation (without Helmfile) ```bash From df59c07a2aebfdd76e23d06b7050b993d2ac7d4b Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 14:08:02 +0530 Subject: [PATCH 48/79] Fix Helm-only deployment documentation --- README.md | 1 + docs/DEPLOYING.md | 29 +++++++++++++++++++++++++++-- docs/QUICKSTART.md | 42 +++++++++++++++++++++++++++++++++++++----- 3 files changed, 65 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 0f05a39..5e3763b 100644 --- a/README.md +++ b/README.md @@ -285,6 +285,7 @@ helm install countly-observability ./charts/countly-observability -n observabili -f environments/my-deployment/observability.yaml # Optional: MongoDB to ClickHouse batch migration (includes bundled Redis) +helm dependency build ./charts/countly-migration helm install countly-migration ./charts/countly-migration -n countly-migration --create-namespace \ --wait --timeout 5m \ -f environments/my-deployment/migration.yaml \ diff --git a/docs/DEPLOYING.md b/docs/DEPLOYING.md index ee6a1d0..afd92fa 100644 --- a/docs/DEPLOYING.md +++ b/docs/DEPLOYING.md @@ -38,7 +38,9 @@ See [DEPLOYMENT-MODES.md](DEPLOYMENT-MODES.md) for all mode options. ## Step 3: Configure Secrets -Fill in the required passwords in per-chart secret files (`secrets-.yaml`), which are gitignored to prevent accidental credential commits. Every chart needs credentials on first install: +Your copied environment already includes the chart-specific secret files from `environments/reference/`. + +Fill in the required passwords in the per-chart secret files (`secrets-.yaml`). Every chart needs credentials on first install: | Secret File | Required Secrets | |-------------|-----------------| @@ -47,7 +49,7 @@ Fill in the required passwords in per-chart secret files (`secrets-.yaml` | `secrets-clickhouse.yaml` | `auth.defaultUserPassword.password` | | `secrets-kafka.yaml` | `kafkaConnect.clickhouse.password` | -See `environments/reference/secrets.example.yaml` for a complete template you can copy. +See `environments/reference/secrets.example.yaml` for a complete reference. **Important:** The ClickHouse password must match across `secrets-countly.yaml`, `secrets-clickhouse.yaml`, and `secrets-kafka.yaml`. The MongoDB password must match across `secrets-countly.yaml` and `secrets-mongodb.yaml`. @@ -71,6 +73,8 @@ helmfile -e my-deployment apply This installs all charts in dependency order with a 10-minute timeout per chart. +If you prefer plain Helm instead of Helmfile, use the same values layering shown in [README.md](/Users/admin/cly/helm/README.md) under manual installation. + ## Step 5: Verify ```bash @@ -102,6 +106,27 @@ Note: PVCs are not deleted by default. Clean up manually if needed. ## Troubleshooting +### Helmfile is not installed locally + +If `helmfile` is not available on your machine, either: + +- install Helmfile and keep using this document, or +- run plain `helm install` / `helm upgrade` commands using the same values files + +The charts themselves do not require Argo CD. + +### Migration chart fails with missing `redis` dependency + +The `countly-migration` chart includes a Redis dependency. + +Before rendering or installing it directly with Helm, run: + +```bash +helm dependency build ./charts/countly-migration +``` + +If you are not deploying migration, you can ignore this. + ### Kafka startup: UNKNOWN_TOPIC_OR_PARTITION errors On a fresh deployment, Countly pods (aggregator, ingestor) may log `UNKNOWN_TOPIC_OR_PARTITION` errors for the first 2-5 minutes. This is expected behavior: diff --git a/docs/QUICKSTART.md b/docs/QUICKSTART.md index 4a7faa0..7cf85cc 100644 --- a/docs/QUICKSTART.md +++ b/docs/QUICKSTART.md @@ -60,6 +60,22 @@ environments/local/.yaml # Environment choices (ingress environments/local/secrets-.yaml # Credentials (gitignored) ``` +Important: +- the repo does not ship real local secret files +- create them before installing by copying from `environments/reference/` + +Recommended setup: + +```bash +cp environments/reference/secrets-countly.yaml environments/local/secrets-countly.yaml +cp environments/reference/secrets-mongodb.yaml environments/local/secrets-mongodb.yaml +cp environments/reference/secrets-clickhouse.yaml environments/local/secrets-clickhouse.yaml +cp environments/reference/secrets-kafka.yaml environments/local/secrets-kafka.yaml +cp environments/reference/secrets-observability.yaml environments/local/secrets-observability.yaml +``` + +Then fill in the required passwords. + ## Install Charts Run from the `helm/` directory. Order matters — each chart must complete before the next starts. @@ -72,6 +88,7 @@ helm install countly-mongodb ./charts/countly-mongodb \ --wait --timeout 10m \ -f environments/local/global.yaml \ -f profiles/sizing/local/mongodb.yaml \ + -f profiles/security/open/mongodb.yaml \ -f environments/local/mongodb.yaml \ -f environments/local/secrets-mongodb.yaml ``` @@ -84,6 +101,7 @@ helm install countly-clickhouse ./charts/countly-clickhouse \ --wait --timeout 10m \ -f environments/local/global.yaml \ -f profiles/sizing/local/clickhouse.yaml \ + -f profiles/security/open/clickhouse.yaml \ -f environments/local/clickhouse.yaml \ -f environments/local/secrets-clickhouse.yaml ``` @@ -96,6 +114,9 @@ helm install countly-kafka ./charts/countly-kafka \ --wait --timeout 10m \ -f environments/local/global.yaml \ -f profiles/sizing/local/kafka.yaml \ + -f profiles/kafka-connect/balanced/kafka.yaml \ + -f profiles/observability/full/kafka.yaml \ + -f profiles/security/open/kafka.yaml \ -f environments/local/kafka.yaml \ -f environments/local/secrets-kafka.yaml ``` @@ -108,6 +129,9 @@ helm install countly ./charts/countly \ --wait --timeout 10m \ -f environments/local/global.yaml \ -f profiles/sizing/local/countly.yaml \ + -f profiles/tls/selfSigned/countly.yaml \ + -f profiles/observability/full/countly.yaml \ + -f profiles/security/open/countly.yaml \ -f environments/local/countly.yaml \ -f environments/local/secrets-countly.yaml ``` @@ -120,6 +144,8 @@ helm install countly-observability ./charts/countly-observability \ --wait --timeout 10m \ -f environments/local/global.yaml \ -f profiles/sizing/local/observability.yaml \ + -f profiles/observability/full/observability.yaml \ + -f profiles/security/open/observability.yaml \ -f environments/local/observability.yaml \ -f environments/local/secrets-observability.yaml ``` @@ -141,6 +167,12 @@ Grafana: https://grafana.local Replace `helm install` with `helm upgrade` (same flags, omit `--create-namespace`). +If you install the optional migration chart directly, first run: + +```bash +helm dependency build ./charts/countly-migration +``` + ## Uninstall Reverse order: @@ -165,11 +197,11 @@ environments/local/ clickhouse.yaml # ServiceMonitor disabled (no Prometheus Operator CRD) kafka.yaml # JMX metrics disabled (KafkaNodePool CRD limitation) observability.yaml # mode: full, Grafana ingress (grafana.local, selfSigned TLS) - secrets-countly.yaml # App secrets (encryption key, session, password) - secrets-mongodb.yaml # MongoDB user passwords - secrets-clickhouse.yaml # ClickHouse default password - secrets-kafka.yaml # Kafka Connect ClickHouse password - secrets-observability.yaml # Empty stub (no secrets needed) + secrets-countly.yaml # Create from environments/reference/ + secrets-mongodb.yaml # Create from environments/reference/ + secrets-clickhouse.yaml # Create from environments/reference/ + secrets-kafka.yaml # Create from environments/reference/ + secrets-observability.yaml # Create from environments/reference/ ``` ## Known Issues (Local) From cf94b7f8f78b23655a29fbcf5b62a872e883b47c Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 14:43:02 +0530 Subject: [PATCH 49/79] Use public Kafka Connect image --- README.md | 2 +- charts/countly-kafka/values.yaml | 4 ++-- environments/gcr-argo/kafka.yaml | 19 +++---------------- environments/local/kafka.yaml | 4 ++-- environments/reference/kafka.yaml | 4 ++-- 5 files changed, 10 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 5e3763b..7685368 100644 --- a/README.md +++ b/README.md @@ -217,7 +217,7 @@ This table shows which images are used by the platform, where they are pulled fr | Component | Image / Pattern | Source Registry | Ownership | Private/GAR Ready | |-------|-------|-------|-------|-------| | Countly app pods (`api`, `frontend`, `ingestor`, `aggregator`, `jobserver`) | `gcr.io/countly-dev-313620/countly-unified:26.01` or `/countly-unified` | `gcr.io` or `us-docker.pkg.dev` | Countly-provided | Yes | -| Kafka Connect ClickHouse | `gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi` or `/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi` | `gcr.io` or `us-docker.pkg.dev` | Countly-provided custom image | Yes | +| Kafka Connect ClickHouse | `countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0` or `/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0` | Docker Hub or `us-docker.pkg.dev` | Countly-provided custom image | Yes | | ClickHouse server | `clickhouse/clickhouse-server:26.2` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | | ClickHouse keeper | `clickhouse/clickhouse-keeper:26.2` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | | MongoDB database | chosen by MongoDB Kubernetes Operator from `version: 8.2.5` | operator-resolved upstream image | Official provider image | No, not via current chart values | diff --git a/charts/countly-kafka/values.yaml b/charts/countly-kafka/values.yaml index 6e54a9d..fe3e781 100644 --- a/charts/countly-kafka/values.yaml +++ b/charts/countly-kafka/values.yaml @@ -115,8 +115,8 @@ cruiseControl: kafkaConnect: enabled: true name: connect-ch - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" - artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" + image: "countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" + artifactImage: "strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" replicas: 2 bootstrapServers: "" resources: diff --git a/environments/gcr-argo/kafka.yaml b/environments/gcr-argo/kafka.yaml index 919b3d8..860b540 100644 --- a/environments/gcr-argo/kafka.yaml +++ b/environments/gcr-argo/kafka.yaml @@ -3,20 +3,7 @@ global: imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: us-docker.pkg.dev/countly-01/countly-unified + mode: direct imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRef: - key: customers-gcr-argo-gar-dockerconfig - imagePullSecrets: - - name: countly-registry - -kafkaConnect: - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" - artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" + enabled: false + imagePullSecrets: [] diff --git a/environments/local/kafka.yaml b/environments/local/kafka.yaml index 6315304..fd7a6d9 100644 --- a/environments/local/kafka.yaml +++ b/environments/local/kafka.yaml @@ -4,8 +4,8 @@ # Use OTel-enabled image (includes /opt/otel/opentelemetry-javaagent.jar) kafkaConnect: - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-otel-strimzi-amd64" - artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-otel-strimzi-amd64" + image: "countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" + artifactImage: "strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" otel: enabled: true resourceAttributes: "service.namespace=countly,deployment.environment=local" diff --git a/environments/reference/kafka.yaml b/environments/reference/kafka.yaml index 13d11a0..2f5911c 100644 --- a/environments/reference/kafka.yaml +++ b/environments/reference/kafka.yaml @@ -138,8 +138,8 @@ cruiseControl: kafkaConnect: enabled: true name: connect-ch - image: "gcr.io/countly-dev-313620/strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" - artifactImage: "strimzi/kafka-connect-clickhouse:4.2.0-1.3.5-strimzi" + image: "countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" + artifactImage: "strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" replicas: 2 bootstrapServers: "" # Auto-derived from cluster if empty From cebddc7b2b03c7a83ed8a3169f213f52aa758158 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 15:34:35 +0530 Subject: [PATCH 50/79] Merge mixed ExternalSecret template data --- .../templates/external-secret-clickhouse-connect.yaml | 1 + charts/countly/templates/external-secret-clickhouse.yaml | 1 + charts/countly/templates/external-secret-common.yaml | 1 + charts/countly/templates/external-secret-kafka.yaml | 1 + 4 files changed, 4 insertions(+) diff --git a/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml b/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml index 3414d88..ff8af55 100644 --- a/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml +++ b/charts/countly-kafka/templates/external-secret-clickhouse-connect.yaml @@ -17,6 +17,7 @@ spec: creationPolicy: Owner template: engineVersion: v2 + mergePolicy: Merge data: {{- if not .Values.secrets.externalSecret.remoteRefs.clickhouse.username }} username: {{ .Values.kafkaConnect.clickhouse.username | quote }} diff --git a/charts/countly/templates/external-secret-clickhouse.yaml b/charts/countly/templates/external-secret-clickhouse.yaml index ea5fab3..a2fc537 100644 --- a/charts/countly/templates/external-secret-clickhouse.yaml +++ b/charts/countly/templates/external-secret-clickhouse.yaml @@ -20,6 +20,7 @@ spec: creationPolicy: Owner template: engineVersion: v2 + mergePolicy: Merge data: {{- if not .Values.secrets.externalSecret.remoteRefs.clickhouse.url }} COUNTLY_CONFIG__CLICKHOUSE_URL: {{ include "countly.clickhouse.url" . | quote }} diff --git a/charts/countly/templates/external-secret-common.yaml b/charts/countly/templates/external-secret-common.yaml index 38cb29a..3d9c7cc 100644 --- a/charts/countly/templates/external-secret-common.yaml +++ b/charts/countly/templates/external-secret-common.yaml @@ -25,6 +25,7 @@ spec: {{- if $hasCommonTemplateData }} template: engineVersion: v2 + mergePolicy: Merge data: {{- if not .Values.secrets.externalSecret.remoteRefs.common.encryptionReportsKey }} COUNTLY_CONFIG__ENCRYPTION_REPORTS_KEY: {{ .Values.secrets.common.encryptionReportsKey | quote }} diff --git a/charts/countly/templates/external-secret-kafka.yaml b/charts/countly/templates/external-secret-kafka.yaml index 8c6f2ec..0f4ab58 100644 --- a/charts/countly/templates/external-secret-kafka.yaml +++ b/charts/countly/templates/external-secret-kafka.yaml @@ -23,6 +23,7 @@ spec: creationPolicy: Owner template: engineVersion: v2 + mergePolicy: Merge data: {{- if not .Values.secrets.externalSecret.remoteRefs.kafka.brokers }} COUNTLY_CONFIG__KAFKA_RDKAFKA_BROKERS: {{ include "countly.kafka.brokers" . | quote }} From 62ba37cd5b20adbe7fe86c3084f4f16aff2651c0 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 16:08:26 +0530 Subject: [PATCH 51/79] Align official image versions --- README.md | 22 +++++++++---------- charts/countly-clickhouse/Chart.yaml | 2 +- charts/countly-clickhouse/values.yaml | 2 +- charts/countly-mongodb/values.yaml | 2 +- charts/countly-observability/README.md | 14 ++++++------ .../templates/loki/statefulset.yaml | 2 +- .../templates/prometheus/statefulset.yaml | 2 +- .../templates/pyroscope/statefulset.yaml | 2 +- .../templates/tempo/statefulset.yaml | 2 +- .../templates/tests/test-backends.yaml | 10 ++++----- charts/countly-observability/values.yaml | 18 +++++++-------- environments/reference/clickhouse.yaml | 2 +- environments/reference/mongodb.yaml | 2 +- environments/reference/observability.yaml | 18 +++++++-------- 14 files changed, 50 insertions(+), 50 deletions(-) diff --git a/README.md b/README.md index 7685368..48206bb 100644 --- a/README.md +++ b/README.md @@ -218,20 +218,20 @@ This table shows which images are used by the platform, where they are pulled fr |-------|-------|-------|-------|-------| | Countly app pods (`api`, `frontend`, `ingestor`, `aggregator`, `jobserver`) | `gcr.io/countly-dev-313620/countly-unified:26.01` or `/countly-unified` | `gcr.io` or `us-docker.pkg.dev` | Countly-provided | Yes | | Kafka Connect ClickHouse | `countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0` or `/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0` | Docker Hub or `us-docker.pkg.dev` | Countly-provided custom image | Yes | -| ClickHouse server | `clickhouse/clickhouse-server:26.2` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | -| ClickHouse keeper | `clickhouse/clickhouse-keeper:26.2` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | +| ClickHouse server | `clickhouse/clickhouse-server:26.3` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | +| ClickHouse keeper | `clickhouse/clickhouse-keeper:26.3` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | | MongoDB database | chosen by MongoDB Kubernetes Operator from `version: 8.2.5` | operator-resolved upstream image | Official provider image | No, not via current chart values | -| MongoDB exporter | `percona/mongodb_exporter:0.40.0` | Docker Hub style namespace | Official provider/vendor image | No | +| MongoDB exporter | `percona/mongodb_exporter:0.47.2` | Docker Hub style namespace | Official provider/vendor image | No | | Migration service | `countly/migration:` | configurable, default public-style repo | Countly-provided | Not wired to GAR automatically | -| Prometheus | `prom/prometheus:v3.10.0` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | -| Loki | `grafana/loki:3.6.7` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | -| Tempo | `grafana/tempo:2.10.1` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | -| Pyroscope | `grafana/pyroscope:1.18.1` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | -| Grafana | `grafana/grafana:12.4.0` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | -| Alloy / Alloy OTLP / Alloy Metrics | `grafana/alloy:v1.13.2` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | -| kube-state-metrics | `registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.18.0` | `registry.k8s.io` | Official provider image | Only via `global.imageRegistry` mirror | +| Prometheus | `prom/prometheus:v3.8.1` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Loki | `grafana/loki:3.6.3` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Tempo | `grafana/tempo:2.8.1` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Pyroscope | `grafana/pyroscope:1.16.0` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Grafana | `grafana/grafana:12.3.5` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| Alloy / Alloy OTLP / Alloy Metrics | `grafana/alloy:v1.14.0` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | +| kube-state-metrics | `registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.17.0` | `registry.k8s.io` | Official provider image | Only via `global.imageRegistry` mirror | | node-exporter | `prom/node-exporter:v1.10.2` | Docker Hub style namespace | Official provider image | Only via `global.imageRegistry` mirror | -| busybox init/test containers | `busybox:1.35` | Docker Hub | Official provider image | No explicit mirror logic | +| busybox init/test containers | `busybox:1.37.0` | Docker Hub | Official provider image | No explicit mirror logic | Operator and platform apps are pinned by Helm chart version in `argocd/operators/`, so this repo controls the chart source and version, but not every underlying container image directly: diff --git a/charts/countly-clickhouse/Chart.yaml b/charts/countly-clickhouse/Chart.yaml index 221a694..f995595 100644 --- a/charts/countly-clickhouse/Chart.yaml +++ b/charts/countly-clickhouse/Chart.yaml @@ -3,7 +3,7 @@ name: countly-clickhouse description: ClickHouse for Countly analytics via ClickHouse Operator type: application version: 0.1.0 -appVersion: "26.2" +appVersion: "26.3" home: https://countly.com icon: https://count.ly/images/logos/countly-logo.svg sources: diff --git a/charts/countly-clickhouse/values.yaml b/charts/countly-clickhouse/values.yaml index 53316a3..4f109ad 100644 --- a/charts/countly-clickhouse/values.yaml +++ b/charts/countly-clickhouse/values.yaml @@ -18,7 +18,7 @@ argocd: clickhouseOperator: apiVersion: clickhouse.com/v1alpha1 -version: "26.2" +version: "26.3" shards: 1 replicas: 2 diff --git a/charts/countly-mongodb/values.yaml b/charts/countly-mongodb/values.yaml index a68b859..5c1fbe3 100644 --- a/charts/countly-mongodb/values.yaml +++ b/charts/countly-mongodb/values.yaml @@ -86,7 +86,7 @@ users: exporter: enabled: true - image: percona/mongodb_exporter:0.40.0 + image: percona/mongodb_exporter:0.47.2 port: 9216 resources: requests: diff --git a/charts/countly-observability/README.md b/charts/countly-observability/README.md index a248252..3cabbeb 100644 --- a/charts/countly-observability/README.md +++ b/charts/countly-observability/README.md @@ -276,13 +276,13 @@ grafana: | Component | Image | Version | |---|---|---| -| Prometheus | `prom/prometheus` | v3.10.0 | -| Grafana | `grafana/grafana` | 12.4.0 | -| Loki | `grafana/loki` | 3.6.7 | -| Tempo | `grafana/tempo` | 2.10.1 | -| Pyroscope | `grafana/pyroscope` | 1.18.1 | -| Alloy | `grafana/alloy` | v1.13.2 | -| kube-state-metrics | `registry.k8s.io/kube-state-metrics/kube-state-metrics` | v2.18.0 | +| Prometheus | `prom/prometheus` | v3.8.1 | +| Grafana | `grafana/grafana` | 12.3.5 | +| Loki | `grafana/loki` | 3.6.3 | +| Tempo | `grafana/tempo` | 2.8.1 | +| Pyroscope | `grafana/pyroscope` | 1.16.0 | +| Alloy | `grafana/alloy` | v1.14.0 | +| kube-state-metrics | `registry.k8s.io/kube-state-metrics/kube-state-metrics` | v2.17.0 | | node-exporter | `prom/node-exporter` | v1.10.2 | --- diff --git a/charts/countly-observability/templates/loki/statefulset.yaml b/charts/countly-observability/templates/loki/statefulset.yaml index c295619..dc0ef64 100644 --- a/charts/countly-observability/templates/loki/statefulset.yaml +++ b/charts/countly-observability/templates/loki/statefulset.yaml @@ -25,7 +25,7 @@ spec: spec: initContainers: - name: init-data-dir - image: busybox:1.35 + image: busybox:1.37.0 command: ['sh', '-c', 'mkdir -p /loki/chunks /loki/rules /loki/tsdb-shipper-cache /loki/wal && chown -R 10001:10001 /loki && chmod -R 755 /loki'] volumeMounts: - name: data diff --git a/charts/countly-observability/templates/prometheus/statefulset.yaml b/charts/countly-observability/templates/prometheus/statefulset.yaml index 42320a0..282e7a4 100644 --- a/charts/countly-observability/templates/prometheus/statefulset.yaml +++ b/charts/countly-observability/templates/prometheus/statefulset.yaml @@ -26,7 +26,7 @@ spec: serviceAccountName: {{ include "obs.fullname" . }}-prometheus initContainers: - name: init-data-dir - image: busybox:1.35 + image: busybox:1.37.0 command: ['sh', '-c', 'mkdir -p /prometheus && chown -R 65534:65534 /prometheus && chmod -R 755 /prometheus'] volumeMounts: - name: data diff --git a/charts/countly-observability/templates/pyroscope/statefulset.yaml b/charts/countly-observability/templates/pyroscope/statefulset.yaml index b5db317..65866e6 100644 --- a/charts/countly-observability/templates/pyroscope/statefulset.yaml +++ b/charts/countly-observability/templates/pyroscope/statefulset.yaml @@ -25,7 +25,7 @@ spec: spec: initContainers: - name: init-data-dir - image: busybox:1.35 + image: busybox:1.37.0 command: ['sh', '-c', 'mkdir -p /data && chown -R 10001:10001 /data && chmod -R 755 /data'] volumeMounts: - name: data diff --git a/charts/countly-observability/templates/tempo/statefulset.yaml b/charts/countly-observability/templates/tempo/statefulset.yaml index 18fff84..d8b94c0 100644 --- a/charts/countly-observability/templates/tempo/statefulset.yaml +++ b/charts/countly-observability/templates/tempo/statefulset.yaml @@ -25,7 +25,7 @@ spec: spec: initContainers: - name: init-data-dir - image: busybox:1.35 + image: busybox:1.37.0 command: ['sh', '-c', 'mkdir -p /var/tempo/traces /var/tempo/wal /var/tempo/generator/wal /var/tempo/generator/traces && chown -R 10001:10001 /var/tempo && chmod -R 755 /var/tempo'] volumeMounts: - name: data diff --git a/charts/countly-observability/templates/tests/test-backends.yaml b/charts/countly-observability/templates/tests/test-backends.yaml index 7941108..610893a 100644 --- a/charts/countly-observability/templates/tests/test-backends.yaml +++ b/charts/countly-observability/templates/tests/test-backends.yaml @@ -13,7 +13,7 @@ spec: restartPolicy: Never containers: - name: test - image: busybox:1.35 + image: busybox:1.37.0 command: ['sh', '-c', 'wget -qO- --timeout=10 http://{{ include "obs.fullname" . }}-prometheus:9090/-/ready'] --- {{- end }} @@ -32,7 +32,7 @@ spec: restartPolicy: Never containers: - name: test - image: busybox:1.35 + image: busybox:1.37.0 command: ['sh', '-c', 'wget -qO- --timeout=10 http://{{ include "obs.fullname" . }}-loki:3100/ready'] --- {{- end }} @@ -51,7 +51,7 @@ spec: restartPolicy: Never containers: - name: test - image: busybox:1.35 + image: busybox:1.37.0 command: ['sh', '-c', 'wget -qO- --timeout=10 http://{{ include "obs.fullname" . }}-tempo:3200/ready'] --- {{- end }} @@ -70,7 +70,7 @@ spec: restartPolicy: Never containers: - name: test - image: busybox:1.35 + image: busybox:1.37.0 command: ['sh', '-c', 'wget -qO- --timeout=10 http://{{ include "obs.fullname" . }}-grafana:3000/api/health'] --- {{- end }} @@ -89,6 +89,6 @@ spec: restartPolicy: Never containers: - name: test - image: busybox:1.35 + image: busybox:1.37.0 command: ['sh', '-c', 'wget -qO- --timeout=10 http://{{ include "obs.fullname" . }}-alloy-otlp:12345/-/ready'] {{- end }} diff --git a/charts/countly-observability/values.yaml b/charts/countly-observability/values.yaml index 5929a36..a114393 100644 --- a/charts/countly-observability/values.yaml +++ b/charts/countly-observability/values.yaml @@ -83,7 +83,7 @@ profiling: prometheus: image: repository: prom/prometheus - tag: "v3.10.0" + tag: "v3.8.1" retention: time: "30d" size: "50GB" @@ -112,7 +112,7 @@ prometheus: loki: image: repository: grafana/loki - tag: "3.6.7" + tag: "3.6.3" retention: "30d" storage: backend: "filesystem" # filesystem | s3 | gcs | azure @@ -158,7 +158,7 @@ loki: tempo: image: repository: grafana/tempo - tag: "2.10.1" + tag: "2.8.1" retention: "12h" storage: backend: "local" # local | s3 | gcs | azure @@ -203,7 +203,7 @@ tempo: pyroscope: image: repository: grafana/pyroscope - tag: "1.18.1" + tag: "1.16.0" retention: "72h" storage: backend: "filesystem" # filesystem | s3 | gcs | azure | swift @@ -242,7 +242,7 @@ grafana: enabled: true image: repository: grafana/grafana - tag: "12.4.0" + tag: "12.3.5" admin: # -- Use an existing Secret for admin credentials existingSecret: "" @@ -287,7 +287,7 @@ grafana: alloy: image: repository: grafana/alloy - tag: "v1.13.2" + tag: "v1.14.0" resources: requests: cpu: "500m" @@ -306,7 +306,7 @@ alloy: alloyOtlp: image: repository: grafana/alloy - tag: "v1.13.2" + tag: "v1.14.0" replicas: 1 resources: requests: @@ -329,7 +329,7 @@ alloyOtlp: alloyMetrics: image: repository: grafana/alloy - tag: "v1.13.2" + tag: "v1.14.0" replicas: 1 resources: requests: @@ -352,7 +352,7 @@ kubeStateMetrics: enabled: true image: repository: registry.k8s.io/kube-state-metrics/kube-state-metrics - tag: "v2.18.0" + tag: "v2.17.0" resources: requests: cpu: "10m" diff --git a/environments/reference/clickhouse.yaml b/environments/reference/clickhouse.yaml index d7899d3..22b2187 100644 --- a/environments/reference/clickhouse.yaml +++ b/environments/reference/clickhouse.yaml @@ -27,7 +27,7 @@ clickhouseOperator: # ============================================================================= # Cluster Topology # ============================================================================= -version: "26.2" +version: "26.3" shards: 1 replicas: 2 diff --git a/environments/reference/mongodb.yaml b/environments/reference/mongodb.yaml index 31f230e..5631bd2 100644 --- a/environments/reference/mongodb.yaml +++ b/environments/reference/mongodb.yaml @@ -90,7 +90,7 @@ users: # ============================================================================= exporter: enabled: true - image: percona/mongodb_exporter:0.40.0 + image: percona/mongodb_exporter:0.47.2 port: 9216 resources: requests: diff --git a/environments/reference/observability.yaml b/environments/reference/observability.yaml index 79a4b39..43b25e9 100644 --- a/environments/reference/observability.yaml +++ b/environments/reference/observability.yaml @@ -84,7 +84,7 @@ profiling: prometheus: image: repository: prom/prometheus - tag: "v3.10.0" + tag: "v3.8.1" retention: time: "30d" size: "50GB" @@ -112,7 +112,7 @@ prometheus: loki: image: repository: grafana/loki - tag: "3.6.7" + tag: "3.6.3" retention: "30d" storage: backend: "filesystem" # filesystem | s3 | gcs | azure @@ -188,7 +188,7 @@ loki: tempo: image: repository: grafana/tempo - tag: "2.10.1" + tag: "2.8.1" retention: "12h" storage: backend: "local" # local | s3 | gcs | azure @@ -231,7 +231,7 @@ tempo: pyroscope: image: repository: grafana/pyroscope - tag: "1.18.1" + tag: "1.16.0" retention: "72h" storage: backend: "filesystem" # filesystem | s3 | gcs | azure | swift @@ -268,7 +268,7 @@ grafana: enabled: true # Only deployed when mode == "full" image: repository: grafana/grafana - tag: "12.4.0" + tag: "12.3.5" admin: existingSecret: "" # Use an existing Secret for admin credentials userKey: "admin-user" @@ -310,7 +310,7 @@ grafana: alloy: image: repository: grafana/alloy - tag: "v1.13.2" + tag: "v1.14.0" resources: requests: cpu: "500m" @@ -329,7 +329,7 @@ alloy: alloyOtlp: image: repository: grafana/alloy - tag: "v1.13.2" + tag: "v1.14.0" replicas: 1 resources: requests: @@ -351,7 +351,7 @@ alloyOtlp: alloyMetrics: image: repository: grafana/alloy - tag: "v1.13.2" + tag: "v1.14.0" replicas: 1 resources: requests: @@ -374,7 +374,7 @@ kubeStateMetrics: enabled: true image: repository: registry.k8s.io/kube-state-metrics/kube-state-metrics - tag: "v2.18.0" + tag: "v2.17.0" resources: requests: cpu: "10m" From 3bf53ccf3be24cf0f817e4e832045a692a1d03aa Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 17:43:19 +0530 Subject: [PATCH 52/79] Add v2-argo customer --- argocd/customers/v2-argo.yaml | 16 ++++ environments/v2-argo/README.md | 77 +++++++++++++++++ environments/v2-argo/clickhouse.yaml | 2 + .../cluster-secret-store.gcp.example.yaml | 31 +++++++ environments/v2-argo/countly-tls.env | 7 ++ environments/v2-argo/countly.yaml | 2 + .../v2-argo/external-secrets.example.yaml | 84 +++++++++++++++++++ environments/v2-argo/global.yaml | 39 +++++++++ .../v2-argo/image-pull-secrets.example.yaml | 41 +++++++++ environments/v2-argo/kafka.yaml | 2 + environments/v2-argo/migration.yaml | 1 + environments/v2-argo/mongodb.yaml | 2 + environments/v2-argo/observability.yaml | 1 + environments/v2-argo/secrets.example.yaml | 62 ++++++++++++++ .../v2-argo/secrets.sops.example.yaml | 21 +++++ 15 files changed, 388 insertions(+) create mode 100644 argocd/customers/v2-argo.yaml create mode 100644 environments/v2-argo/README.md create mode 100644 environments/v2-argo/clickhouse.yaml create mode 100644 environments/v2-argo/cluster-secret-store.gcp.example.yaml create mode 100644 environments/v2-argo/countly-tls.env create mode 100644 environments/v2-argo/countly.yaml create mode 100644 environments/v2-argo/external-secrets.example.yaml create mode 100644 environments/v2-argo/global.yaml create mode 100644 environments/v2-argo/image-pull-secrets.example.yaml create mode 100644 environments/v2-argo/kafka.yaml create mode 100644 environments/v2-argo/migration.yaml create mode 100644 environments/v2-argo/mongodb.yaml create mode 100644 environments/v2-argo/observability.yaml create mode 100644 environments/v2-argo/secrets.example.yaml create mode 100644 environments/v2-argo/secrets.sops.example.yaml diff --git a/argocd/customers/v2-argo.yaml b/argocd/customers/v2-argo.yaml new file mode 100644 index 0000000..5e5b1f2 --- /dev/null +++ b/argocd/customers/v2-argo.yaml @@ -0,0 +1,16 @@ +customer: v2-argo +environment: v2-argo +project: countly-customers +server: https://34.123.21.39 +gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: v2-argo +clusterLocation: us-central1-a +hostname: v2-argo.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/v2-argo/README.md b/environments/v2-argo/README.md new file mode 100644 index 0000000..cceff05 --- /dev/null +++ b/environments/v2-argo/README.md @@ -0,0 +1,77 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + +3. Fill in required secrets in the chart-specific files: + - `secrets-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `secrets-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `secrets-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `secrets-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `secrets-*.yaml` files +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `secrets-mongodb.yaml` | MongoDB user passwords | +| `secrets-clickhouse.yaml` | ClickHouse auth password | +| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | +| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/v2-argo/clickhouse.yaml b/environments/v2-argo/clickhouse.yaml new file mode 100644 index 0000000..17291a9 --- /dev/null +++ b/environments/v2-argo/clickhouse.yaml @@ -0,0 +1,2 @@ +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/v2-argo/cluster-secret-store.gcp.example.yaml b/environments/v2-argo/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..7bb563f --- /dev/null +++ b/environments/v2-argo/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/v2-argo/countly-tls.env b/environments/v2-argo/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/v2-argo/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/v2-argo/countly.yaml b/environments/v2-argo/countly.yaml new file mode 100644 index 0000000..b71d75e --- /dev/null +++ b/environments/v2-argo/countly.yaml @@ -0,0 +1,2 @@ +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/v2-argo/external-secrets.example.yaml b/environments/v2-argo/external-secrets.example.yaml new file mode 100644 index 0000000..cb9fd43 --- /dev/null +++ b/environments/v2-argo/external-secrets.example.yaml @@ -0,0 +1,84 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in the chart-specific secrets files under environments//: +# +# environments//secrets-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" +# clickhouse: +# password: "acme-countly-clickhouse-password" +# mongodb: +# password: "acme-countly-mongodb-password" +# +# environments//secrets-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "acme-clickhouse-default-user-password" +# +# environments//secrets-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# password: "acme-kafka-connect-clickhouse-password" +# +# environments//secrets-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# admin: +# password: "acme-mongodb-admin-password" +# app: +# password: "acme-mongodb-app-password" +# metrics: +# password: "acme-mongodb-metrics-password" +# +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "acme-gar-dockerconfig" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/v2-argo/global.yaml b/environments/v2-argo/global.yaml new file mode 100644 index 0000000..0141b88 --- /dev/null +++ b/environments/v2-argo/global.yaml @@ -0,0 +1,39 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: us-docker.pkg.dev/countly-01/countly-unified + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRef: + key: v2-argo-gar-dockerconfig + storageClass: "" + imagePullSecrets: + - name: countly-registry + +ingress: + hostname: v2-argo.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/v2-argo/image-pull-secrets.example.yaml b/environments/v2-argo/image-pull-secrets.example.yaml new file mode 100644 index 0000000..f1f537f --- /dev/null +++ b/environments/v2-argo/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-gar" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/v2-argo/kafka.yaml b/environments/v2-argo/kafka.yaml new file mode 100644 index 0000000..ff6fe5e --- /dev/null +++ b/environments/v2-argo/kafka.yaml @@ -0,0 +1,2 @@ +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/v2-argo/migration.yaml b/environments/v2-argo/migration.yaml new file mode 100644 index 0000000..fddc542 --- /dev/null +++ b/environments/v2-argo/migration.yaml @@ -0,0 +1 @@ +# Customer-specific migration overrides only. diff --git a/environments/v2-argo/mongodb.yaml b/environments/v2-argo/mongodb.yaml new file mode 100644 index 0000000..ebe28cc --- /dev/null +++ b/environments/v2-argo/mongodb.yaml @@ -0,0 +1,2 @@ +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/v2-argo/observability.yaml b/environments/v2-argo/observability.yaml new file mode 100644 index 0000000..95d895f --- /dev/null +++ b/environments/v2-argo/observability.yaml @@ -0,0 +1 @@ +# Customer-specific observability overrides only. diff --git a/environments/v2-argo/secrets.example.yaml b/environments/v2-argo/secrets.example.yaml new file mode 100644 index 0000000..07af390 --- /dev/null +++ b/environments/v2-argo/secrets.example.yaml @@ -0,0 +1,62 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//secrets-countly.yaml) --- +secrets: + mode: values + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- +secrets: + mode: values +users: + admin: + enabled: true + password: "CHANGEME-super-admin" + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- +secrets: + mode: values +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//secrets-kafka.yaml) --- +secrets: + mode: values +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" + +# For External Secrets Operator, switch the per-chart file to: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore diff --git a/environments/v2-argo/secrets.sops.example.yaml b/environments/v2-argo/secrets.sops.example.yaml new file mode 100644 index 0000000..9b652d1 --- /dev/null +++ b/environments/v2-argo/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//secrets-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//secrets-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From 541e8780ef79424d7cbeac95910035cd5eb2faee Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 17:51:39 +0530 Subject: [PATCH 53/79] Reuse GAR pull secret for v2-argo --- environments/v2-argo/global.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/v2-argo/global.yaml b/environments/v2-argo/global.yaml index 0141b88..bcdf9ff 100644 --- a/environments/v2-argo/global.yaml +++ b/environments/v2-argo/global.yaml @@ -21,7 +21,7 @@ global: name: gcp-secrets kind: ClusterSecretStore remoteRef: - key: v2-argo-gar-dockerconfig + key: customers-gcr-argo-gar-dockerconfig storageClass: "" imagePullSecrets: - name: countly-registry From d7255fc277664603532746317d90c98c8b304816 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 18:12:14 +0530 Subject: [PATCH 54/79] Rename customer secret files to credentials --- .gitignore | 6 ---- README.md | 16 +++++----- argocd/ONBOARDING.md | 20 ++++++------- argocd/README.md | 14 ++++----- argocd/applicationsets/00-mongodb.yaml | 2 +- argocd/applicationsets/01-clickhouse.yaml | 2 +- argocd/applicationsets/02-kafka.yaml | 2 +- argocd/applicationsets/03-countly.yaml | 2 +- argocd/applicationsets/04-observability.yaml | 2 +- argocd/applicationsets/05-migration.yaml | 2 +- .../templates/app-clickhouse.yaml | 2 +- .../countly-argocd/templates/app-countly.yaml | 2 +- .../countly-argocd/templates/app-kafka.yaml | 2 +- .../templates/app-migration.yaml | 2 +- .../countly-argocd/templates/app-mongodb.yaml | 2 +- .../templates/app-observability.yaml | 2 +- docs/DEPLOYING.md | 10 +++---- docs/QUICKSTART.md | 30 +++++++++---------- environments/gcr-argo/README.md | 12 ++++---- ...house.yaml => credentials-clickhouse.yaml} | 0 ...-countly.yaml => credentials-countly.yaml} | 0 ...rets-kafka.yaml => credentials-kafka.yaml} | 0 ...ration.yaml => credentials-migration.yaml} | 0 ...-mongodb.yaml => credentials-mongodb.yaml} | 0 ...ty.yaml => credentials-observability.yaml} | 0 .../gcr-argo/external-secrets.example.yaml | 8 ++--- environments/gcr-argo/secrets.example.yaml | 8 ++--- .../gcr-argo/secrets.sops.example.yaml | 4 +-- environments/local/clickhouse.yaml | 2 +- environments/local/kafka.yaml | 2 +- environments/local/mongodb.yaml | 2 +- environments/reference/README.md | 22 +++++++------- ...house.yaml => credentials-clickhouse.yaml} | 2 +- ...-countly.yaml => credentials-countly.yaml} | 4 +-- ...rets-kafka.yaml => credentials-kafka.yaml} | 0 ...ration.yaml => credentials-migration.yaml} | 0 ...-mongodb.yaml => credentials-mongodb.yaml} | 2 +- ...ty.yaml => credentials-observability.yaml} | 0 .../reference/external-secrets.example.yaml | 8 ++--- environments/reference/secrets.example.yaml | 8 ++--- .../reference/secrets.sops.example.yaml | 4 +-- environments/v2-argo/README.md | 22 +++++++------- .../v2-argo/credentials-clickhouse.yaml | 10 +++++++ environments/v2-argo/credentials-countly.yaml | 22 ++++++++++++++ environments/v2-argo/credentials-kafka.yaml | 11 +++++++ .../v2-argo/credentials-migration.yaml | 2 ++ environments/v2-argo/credentials-mongodb.yaml | 21 +++++++++++++ .../v2-argo/credentials-observability.yaml | 2 ++ .../v2-argo/external-secrets.example.yaml | 8 ++--- environments/v2-argo/secrets.example.yaml | 8 ++--- .../v2-argo/secrets.sops.example.yaml | 4 +-- helmfile.yaml.gotmpl | 12 ++++---- scripts/new-argocd-customer.sh | 2 +- 53 files changed, 197 insertions(+), 135 deletions(-) rename environments/gcr-argo/{secrets-clickhouse.yaml => credentials-clickhouse.yaml} (100%) rename environments/gcr-argo/{secrets-countly.yaml => credentials-countly.yaml} (100%) rename environments/gcr-argo/{secrets-kafka.yaml => credentials-kafka.yaml} (100%) rename environments/gcr-argo/{secrets-migration.yaml => credentials-migration.yaml} (100%) rename environments/gcr-argo/{secrets-mongodb.yaml => credentials-mongodb.yaml} (100%) rename environments/gcr-argo/{secrets-observability.yaml => credentials-observability.yaml} (100%) rename environments/reference/{secrets-clickhouse.yaml => credentials-clickhouse.yaml} (79%) rename environments/reference/{secrets-countly.yaml => credentials-countly.yaml} (86%) rename environments/reference/{secrets-kafka.yaml => credentials-kafka.yaml} (100%) rename environments/reference/{secrets-migration.yaml => credentials-migration.yaml} (100%) rename environments/reference/{secrets-mongodb.yaml => credentials-mongodb.yaml} (87%) rename environments/reference/{secrets-observability.yaml => credentials-observability.yaml} (100%) create mode 100644 environments/v2-argo/credentials-clickhouse.yaml create mode 100644 environments/v2-argo/credentials-countly.yaml create mode 100644 environments/v2-argo/credentials-kafka.yaml create mode 100644 environments/v2-argo/credentials-migration.yaml create mode 100644 environments/v2-argo/credentials-mongodb.yaml create mode 100644 environments/v2-argo/credentials-observability.yaml diff --git a/.gitignore b/.gitignore index 1406193..5c88993 100644 --- a/.gitignore +++ b/.gitignore @@ -8,12 +8,6 @@ overlay-secrets.yaml *-secrets.yaml secrets-*.yaml -# Exception: reference environment templates (contain no real secrets) -!environments/reference/secrets-*.yaml - -# Temporary test customer exception: allow gcr-argo secrets to be committed -!environments/gcr-argo/secrets-*.yaml - # Helmfile state helmfile.lock .helmfile/ diff --git a/README.md b/README.md index 48206bb..03f10e7 100644 --- a/README.md +++ b/README.md @@ -159,7 +159,7 @@ Install required operators before deploying Countly. See [docs/PREREQUISITES.md] - Keep `global.imageSource.mode: direct` for the current direct-pull flow, or switch to `gcpArtifactRegistry` and set `global.imageSource.gcpArtifactRegistry.repositoryPrefix` - Set `global.imagePullSecrets` when pulling from a private registry such as GAR -3. **Fill in required secrets** in the chart-specific files. See `environments/reference/secrets.example.yaml` for a complete reference. +3. **Fill in required credentials** in the chart-specific files. See `environments/reference/secrets.example.yaml` for a complete reference. Keep `secrets.mode: values` for direct YAML values, switch to `secrets.mode: externalSecret` to have the charts create `ExternalSecret` resources backed by your Secret Manager store. 4. **Register your environment** in `helmfile.yaml.gotmpl`: @@ -178,7 +178,7 @@ Install required operators before deploying Countly. See [docs/PREREQUISITES.md] For a GAR-backed production example, see [environments/example-production/global.yaml](/Users/admin/cly/helm/environments/example-production/global.yaml) and replace `countly-gar` with your Kubernetes docker-registry secret name. For GitOps-managed pull secrets, start from [environments/reference/image-pull-secrets.example.yaml](/Users/admin/cly/helm/environments/reference/image-pull-secrets.example.yaml) and encrypt or template it before committing. For Secret Manager + External Secrets Operator, set `global.imagePullSecretExternalSecret` in your environment `global.yaml` so Countly and Kafka Connect each create their own namespaced `dockerconfigjson` pull secret. -Application secrets can use the same pattern in `secrets-countly.yaml`, `secrets-kafka.yaml`, `secrets-clickhouse.yaml`, and `secrets-mongodb.yaml` by switching `secrets.mode` to `externalSecret` and filling `secrets.externalSecret.remoteRefs`. +Application secrets can use the same pattern in `credentials-countly.yaml`, `credentials-kafka.yaml`, `credentials-clickhouse.yaml`, and `credentials-mongodb.yaml` by switching `secrets.mode` to `externalSecret` and filling `secrets.externalSecret.remoteRefs`. Recommended Secret Manager naming convention: - `-gar-dockerconfig` @@ -206,7 +206,7 @@ This creates: - `argocd/customers/.yaml` Then: -1. fill in `environments//secrets-*.yaml` +1. fill in `environments//credentials-*.yaml` 2. commit 3. sync `countly-bootstrap` @@ -252,14 +252,14 @@ helm install countly-mongodb ./charts/countly-mongodb -n mongodb --create-namesp -f environments/my-deployment/global.yaml \ -f profiles/sizing/production/mongodb.yaml \ -f environments/my-deployment/mongodb.yaml \ - -f environments/my-deployment/secrets-mongodb.yaml + -f environments/my-deployment/credentials-mongodb.yaml helm install countly-clickhouse ./charts/countly-clickhouse -n clickhouse --create-namespace \ --wait --timeout 10m \ -f environments/my-deployment/global.yaml \ -f profiles/sizing/production/clickhouse.yaml \ -f environments/my-deployment/clickhouse.yaml \ - -f environments/my-deployment/secrets-clickhouse.yaml + -f environments/my-deployment/credentials-clickhouse.yaml helm install countly-kafka ./charts/countly-kafka -n kafka --create-namespace \ --wait --timeout 10m \ @@ -267,7 +267,7 @@ helm install countly-kafka ./charts/countly-kafka -n kafka --create-namespace \ -f profiles/sizing/production/kafka.yaml \ -f profiles/kafka-connect/balanced/kafka.yaml \ -f environments/my-deployment/kafka.yaml \ - -f environments/my-deployment/secrets-kafka.yaml + -f environments/my-deployment/credentials-kafka.yaml helm install countly ./charts/countly -n countly --create-namespace \ --wait --timeout 10m \ @@ -275,7 +275,7 @@ helm install countly ./charts/countly -n countly --create-namespace \ -f profiles/sizing/production/countly.yaml \ -f profiles/tls/letsencrypt/countly.yaml \ -f environments/my-deployment/countly.yaml \ - -f environments/my-deployment/secrets-countly.yaml + -f environments/my-deployment/credentials-countly.yaml helm install countly-observability ./charts/countly-observability -n observability --create-namespace \ --wait --timeout 10m \ @@ -289,7 +289,7 @@ helm dependency build ./charts/countly-migration helm install countly-migration ./charts/countly-migration -n countly-migration --create-namespace \ --wait --timeout 5m \ -f environments/my-deployment/migration.yaml \ - -f environments/my-deployment/secrets-migration.yaml + -f environments/my-deployment/credentials-migration.yaml ``` ## Configuration Model diff --git a/argocd/ONBOARDING.md b/argocd/ONBOARDING.md index 4cd3d07..6f908ad 100644 --- a/argocd/ONBOARDING.md +++ b/argocd/ONBOARDING.md @@ -264,10 +264,10 @@ Use this when: What to do: - keep `secrets.mode: values` - fill the passwords directly in: - - `environments//secrets-countly.yaml` - - `environments//secrets-kafka.yaml` - - `environments//secrets-clickhouse.yaml` - - `environments//secrets-mongodb.yaml` + - `environments//credentials-countly.yaml` + - `environments//credentials-kafka.yaml` + - `environments//credentials-clickhouse.yaml` + - `environments//credentials-mongodb.yaml` ### Option B: Secret Manager @@ -425,7 +425,7 @@ Create one secret at a time if you are debugging. It is easier to spot mistakes. ### Countly File: -- `environments/reference/secrets-countly.yaml` +- `environments/reference/credentials-countly.yaml` Secret Manager mode example: @@ -455,7 +455,7 @@ secrets: ### Kafka File: -- `environments/reference/secrets-kafka.yaml` +- `environments/reference/credentials-kafka.yaml` ```yaml secrets: @@ -472,7 +472,7 @@ secrets: ### ClickHouse File: -- `environments/reference/secrets-clickhouse.yaml` +- `environments/reference/credentials-clickhouse.yaml` ```yaml secrets: @@ -488,7 +488,7 @@ secrets: ### MongoDB File: -- `environments/reference/secrets-mongodb.yaml` +- `environments/reference/credentials-mongodb.yaml` ```yaml secrets: @@ -605,7 +605,7 @@ This is meant to be easy. ### To Move From Secret Manager Back To Direct Values -1. Put the values back into the `secrets-*.yaml` files. +1. Put the values back into the `credentials-*.yaml` files. 2. Change `secrets.mode: externalSecret` to `secrets.mode: values`. 3. Remove the `remoteRefs`. 4. Commit and sync. @@ -667,7 +667,7 @@ That usually means the shared templates are fine and the customer-specific input Check these first: - `argocd/customers/.yaml` - `environments//global.yaml` -- `environments//secrets-*.yaml` +- `environments//credentials-*.yaml` - the Secret Manager secret names for that customer - the Argo destination server for that customer diff --git a/argocd/README.md b/argocd/README.md index 489c0f7..b8a9f6e 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -134,12 +134,12 @@ migration: disabled Files to review: -- `environments//secrets-countly.yaml` -- `environments//secrets-clickhouse.yaml` -- `environments//secrets-kafka.yaml` -- `environments//secrets-mongodb.yaml` -- `environments//secrets-observability.yaml` -- `environments//secrets-migration.yaml` +- `environments//credentials-countly.yaml` +- `environments//credentials-clickhouse.yaml` +- `environments//credentials-kafka.yaml` +- `environments//credentials-mongodb.yaml` +- `environments//credentials-observability.yaml` +- `environments//credentials-migration.yaml` For direct-value deployments: @@ -293,7 +293,7 @@ For each new customer: 1. Register the cluster in Argo CD. 2. Run the scaffold script. 3. Edit `argocd/customers/.yaml`. -4. Fill in `environments//secrets-*.yaml`. +4. Fill in `environments//credentials-*.yaml`. 5. Review `environments//kafka.yaml` if migration is disabled. 6. Commit and push. 7. Sync `countly-bootstrap`. diff --git a/argocd/applicationsets/00-mongodb.yaml b/argocd/applicationsets/00-mongodb.yaml index b2e4e10..b9de322 100644 --- a/argocd/applicationsets/00-mongodb.yaml +++ b/argocd/applicationsets/00-mongodb.yaml @@ -31,7 +31,7 @@ spec: - "../../profiles/sizing/{{ .sizing }}/mongodb.yaml" - "../../profiles/security/{{ .security }}/mongodb.yaml" - "../../environments/{{ .environment }}/mongodb.yaml" - - "../../environments/{{ .environment }}/secrets-mongodb.yaml" + - "../../environments/{{ .environment }}/credentials-mongodb.yaml" parameters: - name: argocd.enabled value: "true" diff --git a/argocd/applicationsets/01-clickhouse.yaml b/argocd/applicationsets/01-clickhouse.yaml index 655b96b..b745de1 100644 --- a/argocd/applicationsets/01-clickhouse.yaml +++ b/argocd/applicationsets/01-clickhouse.yaml @@ -31,7 +31,7 @@ spec: - "../../profiles/sizing/{{ .sizing }}/clickhouse.yaml" - "../../profiles/security/{{ .security }}/clickhouse.yaml" - "../../environments/{{ .environment }}/clickhouse.yaml" - - "../../environments/{{ .environment }}/secrets-clickhouse.yaml" + - "../../environments/{{ .environment }}/credentials-clickhouse.yaml" parameters: - name: argocd.enabled value: "true" diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml index 6809a49..2e22f8c 100644 --- a/argocd/applicationsets/02-kafka.yaml +++ b/argocd/applicationsets/02-kafka.yaml @@ -33,7 +33,7 @@ spec: - "../../profiles/observability/{{ .observability }}/kafka.yaml" - "../../profiles/security/{{ .security }}/kafka.yaml" - "../../environments/{{ .environment }}/kafka.yaml" - - "../../environments/{{ .environment }}/secrets-kafka.yaml" + - "../../environments/{{ .environment }}/credentials-kafka.yaml" parameters: - name: argocd.enabled value: "true" diff --git a/argocd/applicationsets/03-countly.yaml b/argocd/applicationsets/03-countly.yaml index e90ee9f..f01023d 100644 --- a/argocd/applicationsets/03-countly.yaml +++ b/argocd/applicationsets/03-countly.yaml @@ -33,7 +33,7 @@ spec: - "../../profiles/observability/{{ .observability }}/countly.yaml" - "../../profiles/security/{{ .security }}/countly.yaml" - "../../environments/{{ .environment }}/countly.yaml" - - "../../environments/{{ .environment }}/secrets-countly.yaml" + - "../../environments/{{ .environment }}/credentials-countly.yaml" parameters: - name: argocd.enabled value: "true" diff --git a/argocd/applicationsets/04-observability.yaml b/argocd/applicationsets/04-observability.yaml index 4e7deff..5303ca7 100644 --- a/argocd/applicationsets/04-observability.yaml +++ b/argocd/applicationsets/04-observability.yaml @@ -32,7 +32,7 @@ spec: - "../../profiles/observability/{{ .observability }}/observability.yaml" - "../../profiles/security/{{ .security }}/observability.yaml" - "../../environments/{{ .environment }}/observability.yaml" - - "../../environments/{{ .environment }}/secrets-observability.yaml" + - "../../environments/{{ .environment }}/credentials-observability.yaml" parameters: - name: argocd.enabled value: "true" diff --git a/argocd/applicationsets/05-migration.yaml b/argocd/applicationsets/05-migration.yaml index 59adc7c..455308d 100644 --- a/argocd/applicationsets/05-migration.yaml +++ b/argocd/applicationsets/05-migration.yaml @@ -29,7 +29,7 @@ spec: valueFiles: - "../../environments/{{ .environment }}/global.yaml" - "../../environments/{{ .environment }}/migration.yaml" - - "../../environments/{{ .environment }}/secrets-migration.yaml" + - "../../environments/{{ .environment }}/credentials-migration.yaml" parameters: - name: argocd.enabled value: "true" diff --git a/charts/countly-argocd/templates/app-clickhouse.yaml b/charts/countly-argocd/templates/app-clickhouse.yaml index b164050..fde892f 100644 --- a/charts/countly-argocd/templates/app-clickhouse.yaml +++ b/charts/countly-argocd/templates/app-clickhouse.yaml @@ -23,7 +23,7 @@ spec: - ../../profiles/sizing/{{ .Values.global.sizing }}/clickhouse.yaml - ../../profiles/security/{{ .Values.global.security }}/clickhouse.yaml - ../../environments/{{ .Values.environment }}/clickhouse.yaml - - ../../environments/{{ .Values.environment }}/secrets-clickhouse.yaml + - ../../environments/{{ .Values.environment }}/credentials-clickhouse.yaml parameters: - name: argocd.enabled value: "true" diff --git a/charts/countly-argocd/templates/app-countly.yaml b/charts/countly-argocd/templates/app-countly.yaml index 54c8592..e71346d 100644 --- a/charts/countly-argocd/templates/app-countly.yaml +++ b/charts/countly-argocd/templates/app-countly.yaml @@ -25,7 +25,7 @@ spec: - ../../profiles/observability/{{ .Values.global.observability }}/countly.yaml - ../../profiles/security/{{ .Values.global.security }}/countly.yaml - ../../environments/{{ .Values.environment }}/countly.yaml - - ../../environments/{{ .Values.environment }}/secrets-countly.yaml + - ../../environments/{{ .Values.environment }}/credentials-countly.yaml parameters: - name: argocd.enabled value: "true" diff --git a/charts/countly-argocd/templates/app-kafka.yaml b/charts/countly-argocd/templates/app-kafka.yaml index b373087..9ad325f 100644 --- a/charts/countly-argocd/templates/app-kafka.yaml +++ b/charts/countly-argocd/templates/app-kafka.yaml @@ -25,7 +25,7 @@ spec: - ../../profiles/observability/{{ .Values.global.observability }}/kafka.yaml - ../../profiles/security/{{ .Values.global.security }}/kafka.yaml - ../../environments/{{ .Values.environment }}/kafka.yaml - - ../../environments/{{ .Values.environment }}/secrets-kafka.yaml + - ../../environments/{{ .Values.environment }}/credentials-kafka.yaml parameters: - name: argocd.enabled value: "true" diff --git a/charts/countly-argocd/templates/app-migration.yaml b/charts/countly-argocd/templates/app-migration.yaml index 86ae26f..c0d821e 100644 --- a/charts/countly-argocd/templates/app-migration.yaml +++ b/charts/countly-argocd/templates/app-migration.yaml @@ -21,7 +21,7 @@ spec: valueFiles: - ../../environments/{{ .Values.environment }}/global.yaml - ../../environments/{{ .Values.environment }}/migration.yaml - - ../../environments/{{ .Values.environment }}/secrets-migration.yaml + - ../../environments/{{ .Values.environment }}/credentials-migration.yaml parameters: - name: argocd.enabled value: "true" diff --git a/charts/countly-argocd/templates/app-mongodb.yaml b/charts/countly-argocd/templates/app-mongodb.yaml index ce470a3..86460b9 100644 --- a/charts/countly-argocd/templates/app-mongodb.yaml +++ b/charts/countly-argocd/templates/app-mongodb.yaml @@ -23,7 +23,7 @@ spec: - ../../profiles/sizing/{{ .Values.global.sizing }}/mongodb.yaml - ../../profiles/security/{{ .Values.global.security }}/mongodb.yaml - ../../environments/{{ .Values.environment }}/mongodb.yaml - - ../../environments/{{ .Values.environment }}/secrets-mongodb.yaml + - ../../environments/{{ .Values.environment }}/credentials-mongodb.yaml parameters: - name: argocd.enabled value: "true" diff --git a/charts/countly-argocd/templates/app-observability.yaml b/charts/countly-argocd/templates/app-observability.yaml index 27276d7..9876d82 100644 --- a/charts/countly-argocd/templates/app-observability.yaml +++ b/charts/countly-argocd/templates/app-observability.yaml @@ -24,7 +24,7 @@ spec: - ../../profiles/observability/{{ .Values.global.observability }}/observability.yaml - ../../profiles/security/{{ .Values.global.security }}/observability.yaml - ../../environments/{{ .Values.environment }}/observability.yaml - - ../../environments/{{ .Values.environment }}/secrets-observability.yaml + - ../../environments/{{ .Values.environment }}/credentials-observability.yaml parameters: - name: argocd.enabled value: "true" diff --git a/docs/DEPLOYING.md b/docs/DEPLOYING.md index afd92fa..9c5594e 100644 --- a/docs/DEPLOYING.md +++ b/docs/DEPLOYING.md @@ -44,14 +44,14 @@ Fill in the required passwords in the per-chart secret files (`secrets-.y | Secret File | Required Secrets | |-------------|-----------------| -| `secrets-countly.yaml` | `secrets.common.*` (3 keys), `secrets.clickhouse.password`, `secrets.mongodb.password` | -| `secrets-mongodb.yaml` | `users.app.password`, `users.metrics.password` | -| `secrets-clickhouse.yaml` | `auth.defaultUserPassword.password` | -| `secrets-kafka.yaml` | `kafkaConnect.clickhouse.password` | +| `credentials-countly.yaml` | `secrets.common.*` (3 keys), `secrets.clickhouse.password`, `secrets.mongodb.password` | +| `credentials-mongodb.yaml` | `users.app.password`, `users.metrics.password` | +| `credentials-clickhouse.yaml` | `auth.defaultUserPassword.password` | +| `credentials-kafka.yaml` | `kafkaConnect.clickhouse.password` | See `environments/reference/secrets.example.yaml` for a complete reference. -**Important:** The ClickHouse password must match across `secrets-countly.yaml`, `secrets-clickhouse.yaml`, and `secrets-kafka.yaml`. The MongoDB password must match across `secrets-countly.yaml` and `secrets-mongodb.yaml`. +**Important:** The ClickHouse password must match across `credentials-countly.yaml`, `credentials-clickhouse.yaml`, and `credentials-kafka.yaml`. The MongoDB password must match across `credentials-countly.yaml` and `credentials-mongodb.yaml`. For production secret management options, see [SECRET-MANAGEMENT.md](SECRET-MANAGEMENT.md). diff --git a/docs/QUICKSTART.md b/docs/QUICKSTART.md index 7cf85cc..9cf3c3c 100644 --- a/docs/QUICKSTART.md +++ b/docs/QUICKSTART.md @@ -67,11 +67,11 @@ Important: Recommended setup: ```bash -cp environments/reference/secrets-countly.yaml environments/local/secrets-countly.yaml -cp environments/reference/secrets-mongodb.yaml environments/local/secrets-mongodb.yaml -cp environments/reference/secrets-clickhouse.yaml environments/local/secrets-clickhouse.yaml -cp environments/reference/secrets-kafka.yaml environments/local/secrets-kafka.yaml -cp environments/reference/secrets-observability.yaml environments/local/secrets-observability.yaml +cp environments/reference/credentials-countly.yaml environments/local/credentials-countly.yaml +cp environments/reference/credentials-mongodb.yaml environments/local/credentials-mongodb.yaml +cp environments/reference/credentials-clickhouse.yaml environments/local/credentials-clickhouse.yaml +cp environments/reference/credentials-kafka.yaml environments/local/credentials-kafka.yaml +cp environments/reference/credentials-observability.yaml environments/local/credentials-observability.yaml ``` Then fill in the required passwords. @@ -90,7 +90,7 @@ helm install countly-mongodb ./charts/countly-mongodb \ -f profiles/sizing/local/mongodb.yaml \ -f profiles/security/open/mongodb.yaml \ -f environments/local/mongodb.yaml \ - -f environments/local/secrets-mongodb.yaml + -f environments/local/credentials-mongodb.yaml ``` ### 2. ClickHouse @@ -103,7 +103,7 @@ helm install countly-clickhouse ./charts/countly-clickhouse \ -f profiles/sizing/local/clickhouse.yaml \ -f profiles/security/open/clickhouse.yaml \ -f environments/local/clickhouse.yaml \ - -f environments/local/secrets-clickhouse.yaml + -f environments/local/credentials-clickhouse.yaml ``` ### 3. Kafka @@ -118,7 +118,7 @@ helm install countly-kafka ./charts/countly-kafka \ -f profiles/observability/full/kafka.yaml \ -f profiles/security/open/kafka.yaml \ -f environments/local/kafka.yaml \ - -f environments/local/secrets-kafka.yaml + -f environments/local/credentials-kafka.yaml ``` ### 4. Countly @@ -133,7 +133,7 @@ helm install countly ./charts/countly \ -f profiles/observability/full/countly.yaml \ -f profiles/security/open/countly.yaml \ -f environments/local/countly.yaml \ - -f environments/local/secrets-countly.yaml + -f environments/local/credentials-countly.yaml ``` ### 5. Observability @@ -147,7 +147,7 @@ helm install countly-observability ./charts/countly-observability \ -f profiles/observability/full/observability.yaml \ -f profiles/security/open/observability.yaml \ -f environments/local/observability.yaml \ - -f environments/local/secrets-observability.yaml + -f environments/local/credentials-observability.yaml ``` ## Verify @@ -197,11 +197,11 @@ environments/local/ clickhouse.yaml # ServiceMonitor disabled (no Prometheus Operator CRD) kafka.yaml # JMX metrics disabled (KafkaNodePool CRD limitation) observability.yaml # mode: full, Grafana ingress (grafana.local, selfSigned TLS) - secrets-countly.yaml # Create from environments/reference/ - secrets-mongodb.yaml # Create from environments/reference/ - secrets-clickhouse.yaml # Create from environments/reference/ - secrets-kafka.yaml # Create from environments/reference/ - secrets-observability.yaml # Create from environments/reference/ + credentials-countly.yaml # Create from environments/reference/ + credentials-mongodb.yaml # Create from environments/reference/ + credentials-clickhouse.yaml # Create from environments/reference/ + credentials-kafka.yaml # Create from environments/reference/ + credentials-observability.yaml # Create from environments/reference/ ``` ## Known Issues (Local) diff --git a/environments/gcr-argo/README.md b/environments/gcr-argo/README.md index a2a0335..d764f60 100644 --- a/environments/gcr-argo/README.md +++ b/environments/gcr-argo/README.md @@ -46,7 +46,7 @@ This directory is a complete starting point for a new Countly deployment. See `secrets.example.yaml` for a complete list of all required secrets. For production, choose one of: -- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) - **existingSecret**: Pre-create Kubernetes secrets and reference them - **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) - **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) @@ -65,11 +65,11 @@ If you use External Secrets Operator with Google Secret Manager, point `global.i | `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | | `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | | `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `secrets-mongodb.yaml` | MongoDB user passwords | -| `secrets-clickhouse.yaml` | ClickHouse auth password | -| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | -| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | | `secrets.example.yaml` | Combined secrets reference (all charts in one file) | | `secrets.sops.example.yaml` | SOPS encryption guide | | `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/gcr-argo/secrets-clickhouse.yaml b/environments/gcr-argo/credentials-clickhouse.yaml similarity index 100% rename from environments/gcr-argo/secrets-clickhouse.yaml rename to environments/gcr-argo/credentials-clickhouse.yaml diff --git a/environments/gcr-argo/secrets-countly.yaml b/environments/gcr-argo/credentials-countly.yaml similarity index 100% rename from environments/gcr-argo/secrets-countly.yaml rename to environments/gcr-argo/credentials-countly.yaml diff --git a/environments/gcr-argo/secrets-kafka.yaml b/environments/gcr-argo/credentials-kafka.yaml similarity index 100% rename from environments/gcr-argo/secrets-kafka.yaml rename to environments/gcr-argo/credentials-kafka.yaml diff --git a/environments/gcr-argo/secrets-migration.yaml b/environments/gcr-argo/credentials-migration.yaml similarity index 100% rename from environments/gcr-argo/secrets-migration.yaml rename to environments/gcr-argo/credentials-migration.yaml diff --git a/environments/gcr-argo/secrets-mongodb.yaml b/environments/gcr-argo/credentials-mongodb.yaml similarity index 100% rename from environments/gcr-argo/secrets-mongodb.yaml rename to environments/gcr-argo/credentials-mongodb.yaml diff --git a/environments/gcr-argo/secrets-observability.yaml b/environments/gcr-argo/credentials-observability.yaml similarity index 100% rename from environments/gcr-argo/secrets-observability.yaml rename to environments/gcr-argo/credentials-observability.yaml diff --git a/environments/gcr-argo/external-secrets.example.yaml b/environments/gcr-argo/external-secrets.example.yaml index 1d4a964..f7beae0 100644 --- a/environments/gcr-argo/external-secrets.example.yaml +++ b/environments/gcr-argo/external-secrets.example.yaml @@ -4,7 +4,7 @@ # When using secrets.mode=externalSecret, configure the ESO remoteRefs # in the chart-specific files under environments//: # -# environments//secrets-countly.yaml +# environments//credentials-countly.yaml # secrets: # mode: externalSecret # externalSecret: @@ -22,7 +22,7 @@ # mongodb: # password: "acme-countly-mongodb-password" # -# environments//secrets-kafka.yaml +# environments//credentials-kafka.yaml # secrets: # mode: externalSecret # externalSecret: @@ -34,7 +34,7 @@ # clickhouse: # password: "acme-kafka-connect-clickhouse-password" # -# environments//secrets-clickhouse.yaml +# environments//credentials-clickhouse.yaml # secrets: # mode: externalSecret # externalSecret: @@ -45,7 +45,7 @@ # remoteRefs: # defaultUserPassword: "acme-clickhouse-default-user-password" # -# environments//secrets-mongodb.yaml +# environments//credentials-mongodb.yaml # secrets: # mode: externalSecret # externalSecret: diff --git a/environments/gcr-argo/secrets.example.yaml b/environments/gcr-argo/secrets.example.yaml index 282eb0d..a03c1fc 100644 --- a/environments/gcr-argo/secrets.example.yaml +++ b/environments/gcr-argo/secrets.example.yaml @@ -13,7 +13,7 @@ # - SOPS encryption (see secrets.sops.example.yaml) # ============================================================================= -# --- countly chart (environments//secrets-countly.yaml) --- +# --- countly chart (environments//credentials-countly.yaml) --- secrets: common: encryptionReportsKey: "CHANGEME-min-8-chars" @@ -24,19 +24,19 @@ secrets: mongodb: password: "CHANGEME-match-mongodb-chart" -# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- users: app: password: "CHANGEME-match-secrets.mongodb.password" metrics: password: "CHANGEME-metrics-exporter" -# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- auth: defaultUserPassword: password: "CHANGEME-match-secrets.clickhouse.password" -# --- countly-kafka chart (environments//secrets-kafka.yaml) --- +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- kafkaConnect: clickhouse: password: "CHANGEME-match-clickhouse-password" diff --git a/environments/gcr-argo/secrets.sops.example.yaml b/environments/gcr-argo/secrets.sops.example.yaml index 9b652d1..7335b8d 100644 --- a/environments/gcr-argo/secrets.sops.example.yaml +++ b/environments/gcr-argo/secrets.sops.example.yaml @@ -2,11 +2,11 @@ # SOPS Encrypted Secrets Example # ============================================================================= # Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//secrets-countly.yaml +# sops --encrypt --in-place environments//credentials-countly.yaml # # Configure helmfile to decrypt with the helm-secrets plugin: # values: -# - secrets://environments//secrets-countly.yaml +# - secrets://environments//credentials-countly.yaml # # See: https://github.com/jkroepke/helm-secrets # ============================================================================= diff --git a/environments/local/clickhouse.yaml b/environments/local/clickhouse.yaml index 05ac971..3197512 100644 --- a/environments/local/clickhouse.yaml +++ b/environments/local/clickhouse.yaml @@ -1,6 +1,6 @@ # Local environment — ClickHouse chart overrides (non-sizing) # Profile defaults come from profiles/sizing/local/clickhouse.yaml -# Secrets come from secrets-clickhouse.yaml +# Credentials come from credentials-clickhouse.yaml # Enable server-side OpenTelemetry span logging # Queries with W3C traceparent headers will be logged to system.opentelemetry_span_log diff --git a/environments/local/kafka.yaml b/environments/local/kafka.yaml index fd7a6d9..34459c4 100644 --- a/environments/local/kafka.yaml +++ b/environments/local/kafka.yaml @@ -1,6 +1,6 @@ # Local environment — Kafka chart overrides (non-sizing) # Profile defaults come from profiles/sizing/local/kafka.yaml -# Secrets come from secrets-kafka.yaml +# Credentials come from credentials-kafka.yaml # Use OTel-enabled image (includes /opt/otel/opentelemetry-javaagent.jar) kafkaConnect: diff --git a/environments/local/mongodb.yaml b/environments/local/mongodb.yaml index d7a4f40..4378233 100644 --- a/environments/local/mongodb.yaml +++ b/environments/local/mongodb.yaml @@ -1,4 +1,4 @@ # Local environment — MongoDB chart overrides (non-sizing) # Profile defaults come from profiles/sizing/local/mongodb.yaml -# Secrets come from secrets-mongodb.yaml +# Credentials come from credentials-mongodb.yaml {} diff --git a/environments/reference/README.md b/environments/reference/README.md index cceff05..65a5fb3 100644 --- a/environments/reference/README.md +++ b/environments/reference/README.md @@ -20,10 +20,10 @@ This directory is a complete starting point for a new Countly deployment. - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` 3. Fill in required secrets in the chart-specific files: - - `secrets-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `secrets-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `secrets-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `secrets-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` Or use `secrets.example.yaml` as a complete reference. @@ -46,9 +46,9 @@ This directory is a complete starting point for a new Countly deployment. See `secrets.example.yaml` for a complete list of all required secrets. For production, choose one of: -- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) - **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `secrets-*.yaml` files +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files - **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) For private registries such as GAR, also create namespaced image pull secrets. @@ -65,11 +65,11 @@ If you use External Secrets Operator with Google Secret Manager, point `global.i | `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | | `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | | `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `secrets-mongodb.yaml` | MongoDB user passwords | -| `secrets-clickhouse.yaml` | ClickHouse auth password | -| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | -| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | | `secrets.example.yaml` | Combined secrets reference (all charts in one file) | | `secrets.sops.example.yaml` | SOPS encryption guide | | `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/reference/secrets-clickhouse.yaml b/environments/reference/credentials-clickhouse.yaml similarity index 79% rename from environments/reference/secrets-clickhouse.yaml rename to environments/reference/credentials-clickhouse.yaml index 03933c9..c7a7799 100644 --- a/environments/reference/secrets-clickhouse.yaml +++ b/environments/reference/credentials-clickhouse.yaml @@ -4,7 +4,7 @@ secrets: auth: defaultUserPassword: - password: "" # REQUIRED: must match secrets-countly.yaml secrets.clickhouse.password + password: "" # REQUIRED: must match credentials-countly.yaml secrets.clickhouse.password # For Secret Manager / External Secrets instead of direct values: # secrets: diff --git a/environments/reference/secrets-countly.yaml b/environments/reference/credentials-countly.yaml similarity index 86% rename from environments/reference/secrets-countly.yaml rename to environments/reference/credentials-countly.yaml index bc71814..abe6fb5 100644 --- a/environments/reference/secrets-countly.yaml +++ b/environments/reference/credentials-countly.yaml @@ -8,12 +8,12 @@ secrets: passwordSecret: "" # REQUIRED: min 8 chars clickhouse: username: "default" - password: "" # REQUIRED: must match secrets-clickhouse.yaml + password: "" # REQUIRED: must match credentials-clickhouse.yaml database: "countly_drill" kafka: securityProtocol: "PLAINTEXT" mongodb: - password: "" # REQUIRED: must match secrets-mongodb.yaml + password: "" # REQUIRED: must match credentials-mongodb.yaml # For Secret Manager / External Secrets instead of direct values: # secrets: diff --git a/environments/reference/secrets-kafka.yaml b/environments/reference/credentials-kafka.yaml similarity index 100% rename from environments/reference/secrets-kafka.yaml rename to environments/reference/credentials-kafka.yaml diff --git a/environments/reference/secrets-migration.yaml b/environments/reference/credentials-migration.yaml similarity index 100% rename from environments/reference/secrets-migration.yaml rename to environments/reference/credentials-migration.yaml diff --git a/environments/reference/secrets-mongodb.yaml b/environments/reference/credentials-mongodb.yaml similarity index 87% rename from environments/reference/secrets-mongodb.yaml rename to environments/reference/credentials-mongodb.yaml index f6ce392..69b9ff4 100644 --- a/environments/reference/secrets-mongodb.yaml +++ b/environments/reference/credentials-mongodb.yaml @@ -7,7 +7,7 @@ users: enabled: true password: "" # REQUIRED: MongoDB super admin/root-style user app: - password: "" # REQUIRED: must match secrets-countly.yaml secrets.mongodb.password + password: "" # REQUIRED: must match credentials-countly.yaml secrets.mongodb.password metrics: enabled: true password: "" # REQUIRED: metrics exporter password diff --git a/environments/reference/secrets-observability.yaml b/environments/reference/credentials-observability.yaml similarity index 100% rename from environments/reference/secrets-observability.yaml rename to environments/reference/credentials-observability.yaml diff --git a/environments/reference/external-secrets.example.yaml b/environments/reference/external-secrets.example.yaml index cb9fd43..35e59ad 100644 --- a/environments/reference/external-secrets.example.yaml +++ b/environments/reference/external-secrets.example.yaml @@ -4,7 +4,7 @@ # When using secrets.mode=externalSecret, configure the ESO remoteRefs # in the chart-specific secrets files under environments//: # -# environments//secrets-countly.yaml +# environments//credentials-countly.yaml # secrets: # mode: externalSecret # externalSecret: @@ -22,7 +22,7 @@ # mongodb: # password: "acme-countly-mongodb-password" # -# environments//secrets-clickhouse.yaml +# environments//credentials-clickhouse.yaml # secrets: # mode: externalSecret # externalSecret: @@ -33,7 +33,7 @@ # remoteRefs: # defaultUserPassword: "acme-clickhouse-default-user-password" # -# environments//secrets-kafka.yaml +# environments//credentials-kafka.yaml # secrets: # mode: externalSecret # externalSecret: @@ -45,7 +45,7 @@ # clickhouse: # password: "acme-kafka-connect-clickhouse-password" # -# environments//secrets-mongodb.yaml +# environments//credentials-mongodb.yaml # secrets: # mode: externalSecret # externalSecret: diff --git a/environments/reference/secrets.example.yaml b/environments/reference/secrets.example.yaml index 07af390..181cb54 100644 --- a/environments/reference/secrets.example.yaml +++ b/environments/reference/secrets.example.yaml @@ -13,7 +13,7 @@ # - SOPS encryption (see secrets.sops.example.yaml) # ============================================================================= -# --- countly chart (environments//secrets-countly.yaml) --- +# --- countly chart (environments//credentials-countly.yaml) --- secrets: mode: values common: @@ -25,7 +25,7 @@ secrets: mongodb: password: "CHANGEME-match-mongodb-chart" -# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- secrets: mode: values users: @@ -37,14 +37,14 @@ users: metrics: password: "CHANGEME-metrics-exporter" -# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- secrets: mode: values auth: defaultUserPassword: password: "CHANGEME-match-secrets.clickhouse.password" -# --- countly-kafka chart (environments//secrets-kafka.yaml) --- +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- secrets: mode: values kafkaConnect: diff --git a/environments/reference/secrets.sops.example.yaml b/environments/reference/secrets.sops.example.yaml index 9b652d1..7335b8d 100644 --- a/environments/reference/secrets.sops.example.yaml +++ b/environments/reference/secrets.sops.example.yaml @@ -2,11 +2,11 @@ # SOPS Encrypted Secrets Example # ============================================================================= # Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//secrets-countly.yaml +# sops --encrypt --in-place environments//credentials-countly.yaml # # Configure helmfile to decrypt with the helm-secrets plugin: # values: -# - secrets://environments//secrets-countly.yaml +# - secrets://environments//credentials-countly.yaml # # See: https://github.com/jkroepke/helm-secrets # ============================================================================= diff --git a/environments/v2-argo/README.md b/environments/v2-argo/README.md index cceff05..65a5fb3 100644 --- a/environments/v2-argo/README.md +++ b/environments/v2-argo/README.md @@ -20,10 +20,10 @@ This directory is a complete starting point for a new Countly deployment. - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` 3. Fill in required secrets in the chart-specific files: - - `secrets-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `secrets-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `secrets-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `secrets-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` Or use `secrets.example.yaml` as a complete reference. @@ -46,9 +46,9 @@ This directory is a complete starting point for a new Countly deployment. See `secrets.example.yaml` for a complete list of all required secrets. For production, choose one of: -- **Direct values**: Fill secrets in chart-specific YAML files (split into `secrets-countly.yaml`, `secrets-mongodb.yaml`, etc.) +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) - **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `secrets-*.yaml` files +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files - **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) For private registries such as GAR, also create namespaced image pull secrets. @@ -65,11 +65,11 @@ If you use External Secrets Operator with Google Secret Manager, point `global.i | `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | | `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | | `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `secrets-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `secrets-mongodb.yaml` | MongoDB user passwords | -| `secrets-clickhouse.yaml` | ClickHouse auth password | -| `secrets-kafka.yaml` | Kafka Connect ClickHouse password | -| `secrets-observability.yaml` | Observability secrets (external backend creds if needed) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | | `secrets.example.yaml` | Combined secrets reference (all charts in one file) | | `secrets.sops.example.yaml` | SOPS encryption guide | | `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/v2-argo/credentials-clickhouse.yaml b/environments/v2-argo/credentials-clickhouse.yaml new file mode 100644 index 0000000..92f9f5e --- /dev/null +++ b/environments/v2-argo/credentials-clickhouse.yaml @@ -0,0 +1,10 @@ +# ClickHouse secrets from Google Secret Manager via External Secrets Operator +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: v2-argo-clickhouse-default-user-password diff --git a/environments/v2-argo/credentials-countly.yaml b/environments/v2-argo/credentials-countly.yaml new file mode 100644 index 0000000..3dc43c5 --- /dev/null +++ b/environments/v2-argo/credentials-countly.yaml @@ -0,0 +1,22 @@ +# Countly secrets: static values in Git, passwords from Google Secret Manager via External Secrets Operator +secrets: + mode: externalSecret + clickhouse: + username: "default" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: v2-argo-countly-encryption-reports-key + webSessionSecret: v2-argo-countly-web-session-secret + passwordSecret: v2-argo-countly-password-secret + clickhouse: + password: v2-argo-countly-clickhouse-password + mongodb: + password: v2-argo-countly-mongodb-password diff --git a/environments/v2-argo/credentials-kafka.yaml b/environments/v2-argo/credentials-kafka.yaml new file mode 100644 index 0000000..737bb5a --- /dev/null +++ b/environments/v2-argo/credentials-kafka.yaml @@ -0,0 +1,11 @@ +# Kafka Connect secrets from Google Secret Manager via External Secrets Operator +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + password: v2-argo-kafka-connect-clickhouse-password diff --git a/environments/v2-argo/credentials-migration.yaml b/environments/v2-argo/credentials-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/v2-argo/credentials-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/v2-argo/credentials-mongodb.yaml b/environments/v2-argo/credentials-mongodb.yaml new file mode 100644 index 0000000..10daeba --- /dev/null +++ b/environments/v2-argo/credentials-mongodb.yaml @@ -0,0 +1,21 @@ +# MongoDB secrets from Google Secret Manager via External Secrets Operator +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: v2-argo-mongodb-admin-password + app: + password: v2-argo-mongodb-app-password + metrics: + password: v2-argo-mongodb-metrics-password + +users: + admin: + enabled: true + metrics: + enabled: true diff --git a/environments/v2-argo/credentials-observability.yaml b/environments/v2-argo/credentials-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/v2-argo/credentials-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/v2-argo/external-secrets.example.yaml b/environments/v2-argo/external-secrets.example.yaml index cb9fd43..35e59ad 100644 --- a/environments/v2-argo/external-secrets.example.yaml +++ b/environments/v2-argo/external-secrets.example.yaml @@ -4,7 +4,7 @@ # When using secrets.mode=externalSecret, configure the ESO remoteRefs # in the chart-specific secrets files under environments//: # -# environments//secrets-countly.yaml +# environments//credentials-countly.yaml # secrets: # mode: externalSecret # externalSecret: @@ -22,7 +22,7 @@ # mongodb: # password: "acme-countly-mongodb-password" # -# environments//secrets-clickhouse.yaml +# environments//credentials-clickhouse.yaml # secrets: # mode: externalSecret # externalSecret: @@ -33,7 +33,7 @@ # remoteRefs: # defaultUserPassword: "acme-clickhouse-default-user-password" # -# environments//secrets-kafka.yaml +# environments//credentials-kafka.yaml # secrets: # mode: externalSecret # externalSecret: @@ -45,7 +45,7 @@ # clickhouse: # password: "acme-kafka-connect-clickhouse-password" # -# environments//secrets-mongodb.yaml +# environments//credentials-mongodb.yaml # secrets: # mode: externalSecret # externalSecret: diff --git a/environments/v2-argo/secrets.example.yaml b/environments/v2-argo/secrets.example.yaml index 07af390..181cb54 100644 --- a/environments/v2-argo/secrets.example.yaml +++ b/environments/v2-argo/secrets.example.yaml @@ -13,7 +13,7 @@ # - SOPS encryption (see secrets.sops.example.yaml) # ============================================================================= -# --- countly chart (environments//secrets-countly.yaml) --- +# --- countly chart (environments//credentials-countly.yaml) --- secrets: mode: values common: @@ -25,7 +25,7 @@ secrets: mongodb: password: "CHANGEME-match-mongodb-chart" -# --- countly-mongodb chart (environments//secrets-mongodb.yaml) --- +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- secrets: mode: values users: @@ -37,14 +37,14 @@ users: metrics: password: "CHANGEME-metrics-exporter" -# --- countly-clickhouse chart (environments//secrets-clickhouse.yaml) --- +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- secrets: mode: values auth: defaultUserPassword: password: "CHANGEME-match-secrets.clickhouse.password" -# --- countly-kafka chart (environments//secrets-kafka.yaml) --- +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- secrets: mode: values kafkaConnect: diff --git a/environments/v2-argo/secrets.sops.example.yaml b/environments/v2-argo/secrets.sops.example.yaml index 9b652d1..7335b8d 100644 --- a/environments/v2-argo/secrets.sops.example.yaml +++ b/environments/v2-argo/secrets.sops.example.yaml @@ -2,11 +2,11 @@ # SOPS Encrypted Secrets Example # ============================================================================= # Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//secrets-countly.yaml +# sops --encrypt --in-place environments//credentials-countly.yaml # # Configure helmfile to decrypt with the helm-secrets plugin: # values: -# - secrets://environments//secrets-countly.yaml +# - secrets://environments//credentials-countly.yaml # # See: https://github.com/jkroepke/helm-secrets # ============================================================================= diff --git a/helmfile.yaml.gotmpl b/helmfile.yaml.gotmpl index 3759daf..90b78fe 100644 --- a/helmfile.yaml.gotmpl +++ b/helmfile.yaml.gotmpl @@ -36,7 +36,7 @@ releases: - profiles/sizing/{{ .Values | get "global.sizing" "small" }}/mongodb.yaml - profiles/security/{{ .Values | get "global.security" "open" }}/mongodb.yaml - environments/{{ .Environment.Name }}/mongodb.yaml - - environments/{{ .Environment.Name }}/secrets-mongodb.yaml + - environments/{{ .Environment.Name }}/credentials-mongodb.yaml - name: countly-clickhouse installed: {{ ne (.Values | get "backingServices.clickhouse.mode" "bundled") "external" }} @@ -47,7 +47,7 @@ releases: - profiles/sizing/{{ .Values | get "global.sizing" "small" }}/clickhouse.yaml - profiles/security/{{ .Values | get "global.security" "open" }}/clickhouse.yaml - environments/{{ .Environment.Name }}/clickhouse.yaml - - environments/{{ .Environment.Name }}/secrets-clickhouse.yaml + - environments/{{ .Environment.Name }}/credentials-clickhouse.yaml - name: countly-kafka installed: {{ ne (.Values | get "backingServices.kafka.mode" "bundled") "external" }} @@ -60,7 +60,7 @@ releases: - profiles/observability/{{ .Values | get "global.observability" "full" }}/kafka.yaml - profiles/security/{{ .Values | get "global.security" "open" }}/kafka.yaml - environments/{{ .Environment.Name }}/kafka.yaml - - environments/{{ .Environment.Name }}/secrets-kafka.yaml + - environments/{{ .Environment.Name }}/credentials-kafka.yaml needs: - mongodb/countly-mongodb - clickhouse/countly-clickhouse @@ -75,7 +75,7 @@ releases: - profiles/observability/{{ .Values | get "global.observability" "full" }}/countly.yaml - profiles/security/{{ .Values | get "global.security" "open" }}/countly.yaml - environments/{{ .Environment.Name }}/countly.yaml - - environments/{{ .Environment.Name }}/secrets-countly.yaml + - environments/{{ .Environment.Name }}/credentials-countly.yaml needs: - mongodb/countly-mongodb - clickhouse/countly-clickhouse @@ -92,7 +92,7 @@ releases: - profiles/observability/{{ .Values | get "global.observability" "full" }}/observability.yaml - profiles/security/{{ .Values | get "global.security" "open" }}/observability.yaml - environments/{{ .Environment.Name }}/observability.yaml - - environments/{{ .Environment.Name }}/secrets-observability.yaml + - environments/{{ .Environment.Name }}/credentials-observability.yaml needs: - countly/countly @@ -105,7 +105,7 @@ releases: values: - environments/{{ .Environment.Name }}/global.yaml - environments/{{ .Environment.Name }}/migration.yaml - - environments/{{ .Environment.Name }}/secrets-migration.yaml + - environments/{{ .Environment.Name }}/credentials-migration.yaml needs: - mongodb/countly-mongodb - clickhouse/countly-clickhouse diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index 219d668..fe5ba74 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -147,7 +147,7 @@ Created: ${customer_file} Next: - 1. Fill in environments/${customer}/secrets-*.yaml + 1. Fill in environments/${customer}/credentials-*.yaml 2. Set argocd/customers/${customer}.yaml GCP and cluster metadata for External Secrets 3. Review environments/${customer}/*.yaml for customer-specific overrides 4. Create Secret Manager secrets using the ${customer}-- convention From 511694aea2ae01ce7358e9e0f6bdaf3adaa8975c Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 18:17:35 +0530 Subject: [PATCH 55/79] Keep Kafka Connect image public --- charts/countly-kafka/templates/_helpers.tpl | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/charts/countly-kafka/templates/_helpers.tpl b/charts/countly-kafka/templates/_helpers.tpl index 2bd3fc1..936afa5 100644 --- a/charts/countly-kafka/templates/_helpers.tpl +++ b/charts/countly-kafka/templates/_helpers.tpl @@ -99,17 +99,15 @@ ClickHouse Connect secret name {{- end -}} {{/* -Resolve the Kafka Connect image based on the selected source mode. +Resolve the Kafka Connect image. + +Kafka Connect now uses the public Countly image by default. We intentionally +do not rewrite it through global.imageSource.mode because Countly app images +and Kafka Connect images can follow different distribution paths. */}} {{- define "countly-kafka.connectImage" -}} -{{- $mode := .Values.global.imageSource.mode | default "direct" -}} -{{- if eq $mode "gcpArtifactRegistry" -}} -{{- $prefix := required "global.imageSource.gcpArtifactRegistry.repositoryPrefix is required when global.imageSource.mode is gcpArtifactRegistry" .Values.global.imageSource.gcpArtifactRegistry.repositoryPrefix -}} -{{- printf "%s/%s" ($prefix | trimSuffix "/") .Values.kafkaConnect.artifactImage -}} -{{- else -}} {{- .Values.kafkaConnect.image -}} {{- end -}} -{{- end -}} {{/* Resolve the first configured imagePullSecret name. From fb37800d41ebe2717f26608bc239b174d01ad34b Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 18:58:50 +0530 Subject: [PATCH 56/79] Clean fresh customer path and drop v2-argo --- README.md | 6 +- argocd/ONBOARDING.md | 2 + argocd/customers/v2-argo.yaml | 16 ---- charts/countly-kafka/templates/_helpers.tpl | 10 --- .../templates/external-secret-image-pull.yaml | 26 ------ .../countly-kafka/templates/kafkaconnect.yaml | 4 - charts/countly-kafka/values.schema.json | 43 +--------- charts/countly-kafka/values.yaml | 13 --- docs/DEPLOYING.md | 2 +- docs/QUICKSTART.md | 2 +- environments/local/kafka.yaml | 1 - environments/reference/kafka.yaml | 1 - environments/v2-argo/README.md | 77 ----------------- environments/v2-argo/clickhouse.yaml | 2 - .../cluster-secret-store.gcp.example.yaml | 31 ------- environments/v2-argo/countly-tls.env | 7 -- environments/v2-argo/countly.yaml | 2 - .../v2-argo/credentials-clickhouse.yaml | 10 --- environments/v2-argo/credentials-countly.yaml | 22 ----- environments/v2-argo/credentials-kafka.yaml | 11 --- .../v2-argo/credentials-migration.yaml | 2 - environments/v2-argo/credentials-mongodb.yaml | 21 ----- .../v2-argo/credentials-observability.yaml | 2 - .../v2-argo/external-secrets.example.yaml | 84 ------------------- environments/v2-argo/global.yaml | 39 --------- .../v2-argo/image-pull-secrets.example.yaml | 41 --------- environments/v2-argo/kafka.yaml | 2 - environments/v2-argo/migration.yaml | 1 - environments/v2-argo/mongodb.yaml | 2 - environments/v2-argo/observability.yaml | 1 - environments/v2-argo/secrets.example.yaml | 62 -------------- .../v2-argo/secrets.sops.example.yaml | 21 ----- 32 files changed, 10 insertions(+), 556 deletions(-) delete mode 100644 argocd/customers/v2-argo.yaml delete mode 100644 charts/countly-kafka/templates/external-secret-image-pull.yaml delete mode 100644 environments/v2-argo/README.md delete mode 100644 environments/v2-argo/clickhouse.yaml delete mode 100644 environments/v2-argo/cluster-secret-store.gcp.example.yaml delete mode 100644 environments/v2-argo/countly-tls.env delete mode 100644 environments/v2-argo/countly.yaml delete mode 100644 environments/v2-argo/credentials-clickhouse.yaml delete mode 100644 environments/v2-argo/credentials-countly.yaml delete mode 100644 environments/v2-argo/credentials-kafka.yaml delete mode 100644 environments/v2-argo/credentials-migration.yaml delete mode 100644 environments/v2-argo/credentials-mongodb.yaml delete mode 100644 environments/v2-argo/credentials-observability.yaml delete mode 100644 environments/v2-argo/external-secrets.example.yaml delete mode 100644 environments/v2-argo/global.yaml delete mode 100644 environments/v2-argo/image-pull-secrets.example.yaml delete mode 100644 environments/v2-argo/kafka.yaml delete mode 100644 environments/v2-argo/migration.yaml delete mode 100644 environments/v2-argo/mongodb.yaml delete mode 100644 environments/v2-argo/observability.yaml delete mode 100644 environments/v2-argo/secrets.example.yaml delete mode 100644 environments/v2-argo/secrets.sops.example.yaml diff --git a/README.md b/README.md index 03f10e7..6316684 100644 --- a/README.md +++ b/README.md @@ -177,7 +177,7 @@ Install required operators before deploying Countly. See [docs/PREREQUISITES.md] For a GAR-backed production example, see [environments/example-production/global.yaml](/Users/admin/cly/helm/environments/example-production/global.yaml) and replace `countly-gar` with your Kubernetes docker-registry secret name. For GitOps-managed pull secrets, start from [environments/reference/image-pull-secrets.example.yaml](/Users/admin/cly/helm/environments/reference/image-pull-secrets.example.yaml) and encrypt or template it before committing. -For Secret Manager + External Secrets Operator, set `global.imagePullSecretExternalSecret` in your environment `global.yaml` so Countly and Kafka Connect each create their own namespaced `dockerconfigjson` pull secret. +For Secret Manager + External Secrets Operator, set `global.imagePullSecretExternalSecret` in your environment `global.yaml` so Countly can create its namespaced `dockerconfigjson` pull secret. Application secrets can use the same pattern in `credentials-countly.yaml`, `credentials-kafka.yaml`, `credentials-clickhouse.yaml`, and `credentials-mongodb.yaml` by switching `secrets.mode` to `externalSecret` and filling `secrets.externalSecret.remoteRefs`. Recommended Secret Manager naming convention: @@ -217,7 +217,7 @@ This table shows which images are used by the platform, where they are pulled fr | Component | Image / Pattern | Source Registry | Ownership | Private/GAR Ready | |-------|-------|-------|-------|-------| | Countly app pods (`api`, `frontend`, `ingestor`, `aggregator`, `jobserver`) | `gcr.io/countly-dev-313620/countly-unified:26.01` or `/countly-unified` | `gcr.io` or `us-docker.pkg.dev` | Countly-provided | Yes | -| Kafka Connect ClickHouse | `countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0` or `/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0` | Docker Hub or `us-docker.pkg.dev` | Countly-provided custom image | Yes | +| Kafka Connect ClickHouse | `countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0` | Docker Hub | Countly-provided custom image | Public by default | | ClickHouse server | `clickhouse/clickhouse-server:26.3` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | | ClickHouse keeper | `clickhouse/clickhouse-keeper:26.3` | Docker Hub style namespace | Official provider image | No, not via current GAR toggle | | MongoDB database | chosen by MongoDB Kubernetes Operator from `version: 8.2.5` | operator-resolved upstream image | Official provider image | No, not via current chart values | @@ -315,7 +315,7 @@ Composable profile dimensions — select one value per dimension in `global.yaml Environments contain deployment-specific choices: - `global.yaml` — Profile selectors, hostname, backing service modes - `.yaml` — Per-chart overrides (tuning, network policy, OTEL) -- `secrets-.yaml` — Per-chart secrets (gitignored) +- `credentials-.yaml` — Per-chart credentials overrides ### Deployment Modes diff --git a/argocd/ONBOARDING.md b/argocd/ONBOARDING.md index 6f908ad..aeaa29c 100644 --- a/argocd/ONBOARDING.md +++ b/argocd/ONBOARDING.md @@ -317,6 +317,8 @@ global: key: -gar-dockerconfig ``` +This GAR pull-secret path is for Countly application images. Kafka Connect uses the public `countly/strimzi-kafka-connect-clickhouse` image by default. + ## Step 5: If Using Secret Manager, Prepare The Cluster This is the production path. diff --git a/argocd/customers/v2-argo.yaml b/argocd/customers/v2-argo.yaml deleted file mode 100644 index 5e5b1f2..0000000 --- a/argocd/customers/v2-argo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -customer: v2-argo -environment: v2-argo -project: countly-customers -server: https://34.123.21.39 -gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com -secretManagerProjectID: countly-tools -clusterProjectID: countly-dev-313620 -clusterName: v2-argo -clusterLocation: us-central1-a -hostname: v2-argo.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/charts/countly-kafka/templates/_helpers.tpl b/charts/countly-kafka/templates/_helpers.tpl index 936afa5..941a190 100644 --- a/charts/countly-kafka/templates/_helpers.tpl +++ b/charts/countly-kafka/templates/_helpers.tpl @@ -108,13 +108,3 @@ and Kafka Connect images can follow different distribution paths. {{- define "countly-kafka.connectImage" -}} {{- .Values.kafkaConnect.image -}} {{- end -}} - -{{/* -Resolve the first configured imagePullSecret name. -*/}} -{{- define "countly-kafka.imagePullSecretName" -}} -{{- $pullSecrets := .Values.global.imagePullSecrets | default list -}} -{{- if gt (len $pullSecrets) 0 -}} -{{- (index $pullSecrets 0).name -}} -{{- end -}} -{{- end -}} diff --git a/charts/countly-kafka/templates/external-secret-image-pull.yaml b/charts/countly-kafka/templates/external-secret-image-pull.yaml deleted file mode 100644 index 7354412..0000000 --- a/charts/countly-kafka/templates/external-secret-image-pull.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if .Values.global.imagePullSecretExternalSecret.enabled }} -apiVersion: external-secrets.io/v1 -kind: ExternalSecret -metadata: - name: {{ required "global.imagePullSecrets[0].name is required when global.imagePullSecretExternalSecret.enabled is true" (include "countly-kafka.imagePullSecretName" .) }} - labels: - {{- include "countly-kafka.labels" . | nindent 4 }} - {{- if .Values.argocd.enabled }} - annotations: - {{- include "countly-kafka.syncWave" (dict "wave" "0" "root" .) | nindent 4 }} - {{- end }} -spec: - refreshInterval: {{ .Values.global.imagePullSecretExternalSecret.refreshInterval | default "1h" }} - secretStoreRef: - name: {{ required "global.imagePullSecretExternalSecret.secretStoreRef.name is required when global.imagePullSecretExternalSecret.enabled is true" .Values.global.imagePullSecretExternalSecret.secretStoreRef.name }} - kind: {{ .Values.global.imagePullSecretExternalSecret.secretStoreRef.kind | default "ClusterSecretStore" }} - target: - name: {{ required "global.imagePullSecrets[0].name is required when global.imagePullSecretExternalSecret.enabled is true" (include "countly-kafka.imagePullSecretName" .) }} - creationPolicy: Owner - template: - type: kubernetes.io/dockerconfigjson - data: - - secretKey: .dockerconfigjson - remoteRef: - key: {{ required "global.imagePullSecretExternalSecret.remoteRef.key is required when global.imagePullSecretExternalSecret.enabled is true" .Values.global.imagePullSecretExternalSecret.remoteRef.key }} -{{- end }} diff --git a/charts/countly-kafka/templates/kafkaconnect.yaml b/charts/countly-kafka/templates/kafkaconnect.yaml index 0485288..401df80 100644 --- a/charts/countly-kafka/templates/kafkaconnect.yaml +++ b/charts/countly-kafka/templates/kafkaconnect.yaml @@ -36,10 +36,6 @@ spec: {{- end }} template: pod: - {{- with .Values.global.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} {{- $scheduling := .Values.kafkaConnect.scheduling | default dict }} {{- $antiAffinity := $scheduling.antiAffinity | default dict }} {{- if (and (hasKey $antiAffinity "enabled") $antiAffinity.enabled) }} diff --git a/charts/countly-kafka/values.schema.json b/charts/countly-kafka/values.schema.json index 87f27e0..7f77465 100644 --- a/charts/countly-kafka/values.schema.json +++ b/charts/countly-kafka/values.schema.json @@ -8,41 +8,9 @@ "properties": { "imageRegistry": { "type": "string" }, "imagePullSecrets": { "type": "array" }, - "imagePullSecretExternalSecret": { - "type": "object", - "properties": { - "enabled": { "type": "boolean" }, - "refreshInterval": { "type": "string" }, - "secretStoreRef": { - "type": "object", - "properties": { - "name": { "type": "string" }, - "kind": { "type": "string" } - } - }, - "remoteRef": { - "type": "object", - "properties": { - "key": { "type": "string" } - } - } - } - }, - "imageSource": { - "type": "object", - "properties": { - "mode": { - "type": "string", - "enum": ["direct", "gcpArtifactRegistry"] - }, - "gcpArtifactRegistry": { - "type": "object", - "properties": { - "repositoryPrefix": { "type": "string" } - } - } - } - } + "storageClass": { "type": "string" }, + "sizing": { "type": "string" }, + "scheduling": { "type": "object" } } }, "createNamespace": { "type": "boolean" }, @@ -116,11 +84,6 @@ "enabled": { "type": "boolean" }, "name": { "type": "string" }, "image": { "type": "string", "minLength": 1 }, - "artifactImage": { - "type": "string", - "minLength": 1, - "description": "Image name appended to global.imageSource.gcpArtifactRegistry.repositoryPrefix when using GCP Artifact Registry" - }, "replicas": { "type": "integer", "minimum": 1 }, "bootstrapServers": { "type": "string" }, "resources": { "type": "object" }, diff --git a/charts/countly-kafka/values.yaml b/charts/countly-kafka/values.yaml index fe3e781..ac670e0 100644 --- a/charts/countly-kafka/values.yaml +++ b/charts/countly-kafka/values.yaml @@ -1,17 +1,5 @@ global: imageRegistry: "" - imageSource: - mode: direct - gcpArtifactRegistry: - repositoryPrefix: "" - imagePullSecretExternalSecret: - enabled: false - refreshInterval: "1h" - secretStoreRef: - name: "" - kind: ClusterSecretStore - remoteRef: - key: "" imagePullSecrets: [] storageClass: "" sizing: small @@ -116,7 +104,6 @@ kafkaConnect: enabled: true name: connect-ch image: "countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" - artifactImage: "strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" replicas: 2 bootstrapServers: "" resources: diff --git a/docs/DEPLOYING.md b/docs/DEPLOYING.md index 9c5594e..9bd328b 100644 --- a/docs/DEPLOYING.md +++ b/docs/DEPLOYING.md @@ -40,7 +40,7 @@ See [DEPLOYMENT-MODES.md](DEPLOYMENT-MODES.md) for all mode options. Your copied environment already includes the chart-specific secret files from `environments/reference/`. -Fill in the required passwords in the per-chart secret files (`secrets-.yaml`). Every chart needs credentials on first install: +Fill in the required passwords in the per-chart credential files (`credentials-.yaml`). Every chart needs credentials on first install: | Secret File | Required Secrets | |-------------|-----------------| diff --git a/docs/QUICKSTART.md b/docs/QUICKSTART.md index 9cf3c3c..2a8dedf 100644 --- a/docs/QUICKSTART.md +++ b/docs/QUICKSTART.md @@ -57,7 +57,7 @@ environments/local/global.yaml # Global settings (profile sel profiles/sizing/local/.yaml # Sizing (resources, replicas, HA) profiles///.yaml # Optional dimension profiles environments/local/.yaml # Environment choices (ingress, OTEL, etc.) -environments/local/secrets-.yaml # Credentials (gitignored) +environments/local/credentials-.yaml # Credentials overrides ``` Important: diff --git a/environments/local/kafka.yaml b/environments/local/kafka.yaml index 34459c4..ed80d66 100644 --- a/environments/local/kafka.yaml +++ b/environments/local/kafka.yaml @@ -5,7 +5,6 @@ # Use OTel-enabled image (includes /opt/otel/opentelemetry-javaagent.jar) kafkaConnect: image: "countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" - artifactImage: "strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" otel: enabled: true resourceAttributes: "service.namespace=countly,deployment.environment=local" diff --git a/environments/reference/kafka.yaml b/environments/reference/kafka.yaml index 2f5911c..6100f36 100644 --- a/environments/reference/kafka.yaml +++ b/environments/reference/kafka.yaml @@ -139,7 +139,6 @@ kafkaConnect: enabled: true name: connect-ch image: "countly/strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" - artifactImage: "strimzi-kafka-connect-clickhouse:kafka4.2.0-ch1.3.5-strimzi0.51-otel2.12.0" replicas: 2 bootstrapServers: "" # Auto-derived from cluster if empty diff --git a/environments/v2-argo/README.md b/environments/v2-argo/README.md deleted file mode 100644 index 65a5fb3..0000000 --- a/environments/v2-argo/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` - -3. Fill in required secrets in the chart-specific files: - - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -For private registries such as GAR, also create namespaced image pull secrets. -Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. -If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `credentials-mongodb.yaml` | MongoDB user passwords | -| `credentials-clickhouse.yaml` | ClickHouse auth password | -| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | -| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | -| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | -| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/v2-argo/clickhouse.yaml b/environments/v2-argo/clickhouse.yaml deleted file mode 100644 index 17291a9..0000000 --- a/environments/v2-argo/clickhouse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific ClickHouse overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/v2-argo/cluster-secret-store.gcp.example.yaml b/environments/v2-argo/cluster-secret-store.gcp.example.yaml deleted file mode 100644 index 7bb563f..0000000 --- a/environments/v2-argo/cluster-secret-store.gcp.example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# External Secrets Operator + Google Secret Manager -# ClusterSecretStore Example -# ============================================================================= -# Apply this once per cluster after External Secrets Operator is installed. -# -# Prerequisites: -# - The external-secrets controller service account is annotated for Workload -# Identity with a GCP service account that can read Secret Manager secrets. -# - The GCP service account has at least: -# roles/secretmanager.secretAccessor -# -# This file is a reference only. Adapt project IDs and names to your cluster. -# ============================================================================= - -apiVersion: external-secrets.io/v1 -kind: ClusterSecretStore -metadata: - name: gcp-secrets -spec: - provider: - gcpsm: - projectID: countly-dev-313620 - auth: - workloadIdentity: - clusterLocation: us-central1 - clusterName: change-me - clusterProjectID: countly-dev-313620 - serviceAccountRef: - name: external-secrets - namespace: external-secrets diff --git a/environments/v2-argo/countly-tls.env b/environments/v2-argo/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/v2-argo/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/v2-argo/countly.yaml b/environments/v2-argo/countly.yaml deleted file mode 100644 index b71d75e..0000000 --- a/environments/v2-argo/countly.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Countly overrides only. -# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/v2-argo/credentials-clickhouse.yaml b/environments/v2-argo/credentials-clickhouse.yaml deleted file mode 100644 index 92f9f5e..0000000 --- a/environments/v2-argo/credentials-clickhouse.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# ClickHouse secrets from Google Secret Manager via External Secrets Operator -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - defaultUserPassword: v2-argo-clickhouse-default-user-password diff --git a/environments/v2-argo/credentials-countly.yaml b/environments/v2-argo/credentials-countly.yaml deleted file mode 100644 index 3dc43c5..0000000 --- a/environments/v2-argo/credentials-countly.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Countly secrets: static values in Git, passwords from Google Secret Manager via External Secrets Operator -secrets: - mode: externalSecret - clickhouse: - username: "default" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: v2-argo-countly-encryption-reports-key - webSessionSecret: v2-argo-countly-web-session-secret - passwordSecret: v2-argo-countly-password-secret - clickhouse: - password: v2-argo-countly-clickhouse-password - mongodb: - password: v2-argo-countly-mongodb-password diff --git a/environments/v2-argo/credentials-kafka.yaml b/environments/v2-argo/credentials-kafka.yaml deleted file mode 100644 index 737bb5a..0000000 --- a/environments/v2-argo/credentials-kafka.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# Kafka Connect secrets from Google Secret Manager via External Secrets Operator -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - clickhouse: - password: v2-argo-kafka-connect-clickhouse-password diff --git a/environments/v2-argo/credentials-migration.yaml b/environments/v2-argo/credentials-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/v2-argo/credentials-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/v2-argo/credentials-mongodb.yaml b/environments/v2-argo/credentials-mongodb.yaml deleted file mode 100644 index 10daeba..0000000 --- a/environments/v2-argo/credentials-mongodb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# MongoDB secrets from Google Secret Manager via External Secrets Operator -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - admin: - password: v2-argo-mongodb-admin-password - app: - password: v2-argo-mongodb-app-password - metrics: - password: v2-argo-mongodb-metrics-password - -users: - admin: - enabled: true - metrics: - enabled: true diff --git a/environments/v2-argo/credentials-observability.yaml b/environments/v2-argo/credentials-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/v2-argo/credentials-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/v2-argo/external-secrets.example.yaml b/environments/v2-argo/external-secrets.example.yaml deleted file mode 100644 index 35e59ad..0000000 --- a/environments/v2-argo/external-secrets.example.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in the chart-specific secrets files under environments//: -# -# environments//credentials-countly.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "acme-countly-encryption-reports-key" -# webSessionSecret: "acme-countly-web-session-secret" -# passwordSecret: "acme-countly-password-secret" -# clickhouse: -# password: "acme-countly-clickhouse-password" -# mongodb: -# password: "acme-countly-mongodb-password" -# -# environments//credentials-clickhouse.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# defaultUserPassword: "acme-clickhouse-default-user-password" -# -# environments//credentials-kafka.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# clickhouse: -# password: "acme-kafka-connect-clickhouse-password" -# -# environments//credentials-mongodb.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# admin: -# password: "acme-mongodb-admin-password" -# app: -# password: "acme-mongodb-app-password" -# metrics: -# password: "acme-mongodb-metrics-password" -# -# For GAR image pulls, configure this in environments//global.yaml: -# -# global: -# imagePullSecrets: -# - name: countly-registry -# imagePullSecretExternalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRef: -# key: "acme-gar-dockerconfig" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/v2-argo/global.yaml b/environments/v2-argo/global.yaml deleted file mode 100644 index bcdf9ff..0000000 --- a/environments/v2-argo/global.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: us-docker.pkg.dev/countly-01/countly-unified - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRef: - key: customers-gcr-argo-gar-dockerconfig - storageClass: "" - imagePullSecrets: - - name: countly-registry - -ingress: - hostname: v2-argo.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/v2-argo/image-pull-secrets.example.yaml b/environments/v2-argo/image-pull-secrets.example.yaml deleted file mode 100644 index f1f537f..0000000 --- a/environments/v2-argo/image-pull-secrets.example.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# ============================================================================= -# Image Pull Secrets Example -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. -# -# Use this when Countly and Kafka Connect pull from a private registry -# such as GCP Artifact Registry (GAR). -# -# Replace: -# - metadata.name with your actual secret name if not using "countly-gar" -# - namespaces if your releases run elsewhere -# - .dockerconfigjson with the base64-encoded contents of your Docker config -# -# You need one secret per namespace because imagePullSecrets are namespaced. -# For the default layout in this repo, create the same secret in: -# - countly -# - kafka -# -# Example source for the Docker config: -# cat ~/.docker/config.json | base64 | tr -d '\n' -# -# Kubernetes secret type must be: kubernetes.io/dockerconfigjson -# ============================================================================= - -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: countly -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON ---- -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: kafka -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/v2-argo/kafka.yaml b/environments/v2-argo/kafka.yaml deleted file mode 100644 index ff6fe5e..0000000 --- a/environments/v2-argo/kafka.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Kafka overrides only. -# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/v2-argo/migration.yaml b/environments/v2-argo/migration.yaml deleted file mode 100644 index fddc542..0000000 --- a/environments/v2-argo/migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific migration overrides only. diff --git a/environments/v2-argo/mongodb.yaml b/environments/v2-argo/mongodb.yaml deleted file mode 100644 index ebe28cc..0000000 --- a/environments/v2-argo/mongodb.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific MongoDB overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/v2-argo/observability.yaml b/environments/v2-argo/observability.yaml deleted file mode 100644 index 95d895f..0000000 --- a/environments/v2-argo/observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific observability overrides only. diff --git a/environments/v2-argo/secrets.example.yaml b/environments/v2-argo/secrets.example.yaml deleted file mode 100644 index 181cb54..0000000 --- a/environments/v2-argo/secrets.example.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//credentials-countly.yaml) --- -secrets: - mode: values - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- -secrets: - mode: values -users: - admin: - enabled: true - password: "CHANGEME-super-admin" - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- -secrets: - mode: values -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//credentials-kafka.yaml) --- -secrets: - mode: values -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" - -# For External Secrets Operator, switch the per-chart file to: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore diff --git a/environments/v2-argo/secrets.sops.example.yaml b/environments/v2-argo/secrets.sops.example.yaml deleted file mode 100644 index 7335b8d..0000000 --- a/environments/v2-argo/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//credentials-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//credentials-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From b60f6652ad1f8e95db1a6de95c1b17e8f2745917 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 19:44:07 +0530 Subject: [PATCH 57/79] Remove gcr-argo customer --- argocd/customers/gcr-argo.yaml | 16 ---- environments/gcr-argo/README.md | 77 ----------------- environments/gcr-argo/clickhouse.yaml | 2 - .../cluster-secret-store.gcp.example.yaml | 31 ------- environments/gcr-argo/countly-tls.env | 7 -- environments/gcr-argo/countly.yaml | 21 ----- .../gcr-argo/credentials-clickhouse.yaml | 10 --- .../gcr-argo/credentials-countly.yaml | 22 ----- environments/gcr-argo/credentials-kafka.yaml | 12 --- .../gcr-argo/credentials-migration.yaml | 2 - .../gcr-argo/credentials-mongodb.yaml | 21 ----- .../gcr-argo/credentials-observability.yaml | 2 - .../gcr-argo/external-secrets.example.yaml | 84 ------------------- environments/gcr-argo/global.yaml | 39 --------- .../gcr-argo/image-pull-secrets.example.yaml | 41 --------- environments/gcr-argo/kafka.yaml | 9 -- environments/gcr-argo/migration.yaml | 2 - environments/gcr-argo/mongodb.yaml | 2 - environments/gcr-argo/observability.yaml | 2 - environments/gcr-argo/secrets.example.yaml | 42 ---------- .../gcr-argo/secrets.sops.example.yaml | 21 ----- 21 files changed, 465 deletions(-) delete mode 100644 argocd/customers/gcr-argo.yaml delete mode 100644 environments/gcr-argo/README.md delete mode 100644 environments/gcr-argo/clickhouse.yaml delete mode 100644 environments/gcr-argo/cluster-secret-store.gcp.example.yaml delete mode 100644 environments/gcr-argo/countly-tls.env delete mode 100644 environments/gcr-argo/countly.yaml delete mode 100644 environments/gcr-argo/credentials-clickhouse.yaml delete mode 100644 environments/gcr-argo/credentials-countly.yaml delete mode 100644 environments/gcr-argo/credentials-kafka.yaml delete mode 100644 environments/gcr-argo/credentials-migration.yaml delete mode 100644 environments/gcr-argo/credentials-mongodb.yaml delete mode 100644 environments/gcr-argo/credentials-observability.yaml delete mode 100644 environments/gcr-argo/external-secrets.example.yaml delete mode 100644 environments/gcr-argo/global.yaml delete mode 100644 environments/gcr-argo/image-pull-secrets.example.yaml delete mode 100644 environments/gcr-argo/kafka.yaml delete mode 100644 environments/gcr-argo/migration.yaml delete mode 100644 environments/gcr-argo/mongodb.yaml delete mode 100644 environments/gcr-argo/observability.yaml delete mode 100644 environments/gcr-argo/secrets.example.yaml delete mode 100644 environments/gcr-argo/secrets.sops.example.yaml diff --git a/argocd/customers/gcr-argo.yaml b/argocd/customers/gcr-argo.yaml deleted file mode 100644 index c662d03..0000000 --- a/argocd/customers/gcr-argo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -customer: gcr-argo -environment: gcr-argo -project: countly-customers -server: https://34.60.231.37 -gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com -secretManagerProjectID: countly-tools -clusterProjectID: countly-dev-313620 -clusterName: gcr-argo -clusterLocation: us-central1-a -hostname: gcr-argo.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/environments/gcr-argo/README.md b/environments/gcr-argo/README.md deleted file mode 100644 index d764f60..0000000 --- a/environments/gcr-argo/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` - -3. Fill in required secrets in the chart-specific files: - - `countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `clickhouse.yaml` → `auth.defaultUserPassword.password` - - `kafka.yaml` → `kafkaConnect.clickhouse.password` - - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator (see `external-secrets.example.yaml`) -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -For private registries such as GAR, also create namespaced image pull secrets. -Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. -If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `credentials-mongodb.yaml` | MongoDB user passwords | -| `credentials-clickhouse.yaml` | ClickHouse auth password | -| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | -| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | -| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | -| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/gcr-argo/clickhouse.yaml b/environments/gcr-argo/clickhouse.yaml deleted file mode 100644 index 17291a9..0000000 --- a/environments/gcr-argo/clickhouse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific ClickHouse overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/gcr-argo/cluster-secret-store.gcp.example.yaml b/environments/gcr-argo/cluster-secret-store.gcp.example.yaml deleted file mode 100644 index 7bb563f..0000000 --- a/environments/gcr-argo/cluster-secret-store.gcp.example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# External Secrets Operator + Google Secret Manager -# ClusterSecretStore Example -# ============================================================================= -# Apply this once per cluster after External Secrets Operator is installed. -# -# Prerequisites: -# - The external-secrets controller service account is annotated for Workload -# Identity with a GCP service account that can read Secret Manager secrets. -# - The GCP service account has at least: -# roles/secretmanager.secretAccessor -# -# This file is a reference only. Adapt project IDs and names to your cluster. -# ============================================================================= - -apiVersion: external-secrets.io/v1 -kind: ClusterSecretStore -metadata: - name: gcp-secrets -spec: - provider: - gcpsm: - projectID: countly-dev-313620 - auth: - workloadIdentity: - clusterLocation: us-central1 - clusterName: change-me - clusterProjectID: countly-dev-313620 - serviceAccountRef: - name: external-secrets - namespace: external-secrets diff --git a/environments/gcr-argo/countly-tls.env b/environments/gcr-argo/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/gcr-argo/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/gcr-argo/countly.yaml b/environments/gcr-argo/countly.yaml deleted file mode 100644 index 53222d7..0000000 --- a/environments/gcr-argo/countly.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Customer-specific Countly overrides only. -# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. - -global: - imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: us-docker.pkg.dev/countly-01/countly-unified - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRef: - key: customers-gcr-argo-gar-dockerconfig - imagePullSecrets: - - name: countly-registry - -image: - digest: "sha256:b42efb9713ee11d173fe409924fb9e2a208b5c0beafed9e42f349b996b6650a4" diff --git a/environments/gcr-argo/credentials-clickhouse.yaml b/environments/gcr-argo/credentials-clickhouse.yaml deleted file mode 100644 index d67ff5d..0000000 --- a/environments/gcr-argo/credentials-clickhouse.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# ClickHouse secrets from Google Secret Manager via External Secrets Operator -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - defaultUserPassword: customers-gcr-argo-clickhouse-default-user-password diff --git a/environments/gcr-argo/credentials-countly.yaml b/environments/gcr-argo/credentials-countly.yaml deleted file mode 100644 index 6f7311e..0000000 --- a/environments/gcr-argo/credentials-countly.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Countly secrets: static values in Git, passwords from Google Secret Manager via External Secrets Operator -secrets: - mode: externalSecret - clickhouse: - username: "default" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: customers-gcr-argo-countly-encryption-reports-key - webSessionSecret: customers-gcr-argo-countly-web-session-secret - passwordSecret: customers-gcr-argo-countly-password-secret - clickhouse: - password: customers-gcr-argo-countly-clickhouse-password - mongodb: - password: customers-gcr-argo-countly-mongodb-password diff --git a/environments/gcr-argo/credentials-kafka.yaml b/environments/gcr-argo/credentials-kafka.yaml deleted file mode 100644 index acc0e8c..0000000 --- a/environments/gcr-argo/credentials-kafka.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# Kafka Connect secrets from Google Secret Manager via External Secrets Operator -secrets: - mode: externalSecret - # keep non-sensitive static values here; fetch only passwords remotely - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - clickhouse: - password: customers-gcr-argo-kafka-connect-clickhouse-password diff --git a/environments/gcr-argo/credentials-migration.yaml b/environments/gcr-argo/credentials-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/gcr-argo/credentials-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/gcr-argo/credentials-mongodb.yaml b/environments/gcr-argo/credentials-mongodb.yaml deleted file mode 100644 index f4958f6..0000000 --- a/environments/gcr-argo/credentials-mongodb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# MongoDB secrets from Google Secret Manager via External Secrets Operator -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - admin: - password: customers-gcr-argo-mongodb-admin-password - app: - password: customers-gcr-argo-mongodb-app-password - metrics: - password: customers-gcr-argo-mongodb-metrics-password - -users: - admin: - enabled: true - metrics: - enabled: true diff --git a/environments/gcr-argo/credentials-observability.yaml b/environments/gcr-argo/credentials-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/gcr-argo/credentials-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/gcr-argo/external-secrets.example.yaml b/environments/gcr-argo/external-secrets.example.yaml deleted file mode 100644 index f7beae0..0000000 --- a/environments/gcr-argo/external-secrets.example.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in the chart-specific files under environments//: -# -# environments//credentials-countly.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "acme-countly-encryption-reports-key" -# webSessionSecret: "acme-countly-web-session-secret" -# passwordSecret: "acme-countly-password-secret" -# clickhouse: -# password: "acme-countly-clickhouse-password" -# mongodb: -# password: "acme-countly-mongodb-password" -# -# environments//credentials-kafka.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# clickhouse: -# password: "acme-kafka-connect-clickhouse-password" -# -# environments//credentials-clickhouse.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# defaultUserPassword: "acme-clickhouse-default-user-password" -# -# environments//credentials-mongodb.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# admin: -# password: "acme-mongodb-admin-password" -# app: -# password: "acme-mongodb-app-password" -# metrics: -# password: "acme-mongodb-metrics-password" -# -# For GAR image pulls, configure this in environments//global.yaml: -# -# global: -# imagePullSecrets: -# - name: countly-registry -# imagePullSecretExternalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRef: -# key: "acme-gar-dockerconfig" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/gcr-argo/global.yaml b/environments/gcr-argo/global.yaml deleted file mode 100644 index 333ac86..0000000 --- a/environments/gcr-argo/global.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: us-docker.pkg.dev/countly-01/countly-unified - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRef: - key: customers-gcr-argo-gar-dockerconfig - storageClass: "" - imagePullSecrets: - - name: countly-registry - -ingress: - hostname: gcr-argo.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/gcr-argo/image-pull-secrets.example.yaml b/environments/gcr-argo/image-pull-secrets.example.yaml deleted file mode 100644 index f1f537f..0000000 --- a/environments/gcr-argo/image-pull-secrets.example.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# ============================================================================= -# Image Pull Secrets Example -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. -# -# Use this when Countly and Kafka Connect pull from a private registry -# such as GCP Artifact Registry (GAR). -# -# Replace: -# - metadata.name with your actual secret name if not using "countly-gar" -# - namespaces if your releases run elsewhere -# - .dockerconfigjson with the base64-encoded contents of your Docker config -# -# You need one secret per namespace because imagePullSecrets are namespaced. -# For the default layout in this repo, create the same secret in: -# - countly -# - kafka -# -# Example source for the Docker config: -# cat ~/.docker/config.json | base64 | tr -d '\n' -# -# Kubernetes secret type must be: kubernetes.io/dockerconfigjson -# ============================================================================= - -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: countly -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON ---- -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: kafka -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/gcr-argo/kafka.yaml b/environments/gcr-argo/kafka.yaml deleted file mode 100644 index 860b540..0000000 --- a/environments/gcr-argo/kafka.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# Customer-specific Kafka overrides only. -# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. - -global: - imageSource: - mode: direct - imagePullSecretExternalSecret: - enabled: false - imagePullSecrets: [] diff --git a/environments/gcr-argo/migration.yaml b/environments/gcr-argo/migration.yaml deleted file mode 100644 index 7cb4b34..0000000 --- a/environments/gcr-argo/migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific migration overrides only. -# This deployment uses the disabled migration profile. diff --git a/environments/gcr-argo/mongodb.yaml b/environments/gcr-argo/mongodb.yaml deleted file mode 100644 index ebe28cc..0000000 --- a/environments/gcr-argo/mongodb.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific MongoDB overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/gcr-argo/observability.yaml b/environments/gcr-argo/observability.yaml deleted file mode 100644 index 56e78bb..0000000 --- a/environments/gcr-argo/observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific observability overrides only. -# This deployment uses the disabled observability profile. diff --git a/environments/gcr-argo/secrets.example.yaml b/environments/gcr-argo/secrets.example.yaml deleted file mode 100644 index a03c1fc..0000000 --- a/environments/gcr-argo/secrets.example.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//credentials-countly.yaml) --- -secrets: - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- -users: - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//credentials-kafka.yaml) --- -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" diff --git a/environments/gcr-argo/secrets.sops.example.yaml b/environments/gcr-argo/secrets.sops.example.yaml deleted file mode 100644 index 7335b8d..0000000 --- a/environments/gcr-argo/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//credentials-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//credentials-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From 774e23eabe93a39c92993a28e4791c8612d00680 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 20:31:21 +0530 Subject: [PATCH 58/79] Add v2-argo customer --- argocd/customers/v2-argo.yaml | 16 ++++ environments/v2-argo/README.md | 77 +++++++++++++++++ environments/v2-argo/clickhouse.yaml | 2 + .../cluster-secret-store.gcp.example.yaml | 31 +++++++ environments/v2-argo/countly-tls.env | 7 ++ environments/v2-argo/countly.yaml | 2 + .../v2-argo/credentials-clickhouse.yaml | 10 +++ environments/v2-argo/credentials-countly.yaml | 22 +++++ environments/v2-argo/credentials-kafka.yaml | 11 +++ .../v2-argo/credentials-migration.yaml | 2 + environments/v2-argo/credentials-mongodb.yaml | 21 +++++ .../v2-argo/credentials-observability.yaml | 2 + .../v2-argo/external-secrets.example.yaml | 84 +++++++++++++++++++ environments/v2-argo/global.yaml | 39 +++++++++ .../v2-argo/image-pull-secrets.example.yaml | 41 +++++++++ environments/v2-argo/kafka.yaml | 2 + environments/v2-argo/migration.yaml | 1 + environments/v2-argo/mongodb.yaml | 2 + environments/v2-argo/observability.yaml | 1 + environments/v2-argo/secrets.example.yaml | 62 ++++++++++++++ .../v2-argo/secrets.sops.example.yaml | 21 +++++ 21 files changed, 456 insertions(+) create mode 100644 argocd/customers/v2-argo.yaml create mode 100644 environments/v2-argo/README.md create mode 100644 environments/v2-argo/clickhouse.yaml create mode 100644 environments/v2-argo/cluster-secret-store.gcp.example.yaml create mode 100644 environments/v2-argo/countly-tls.env create mode 100644 environments/v2-argo/countly.yaml create mode 100644 environments/v2-argo/credentials-clickhouse.yaml create mode 100644 environments/v2-argo/credentials-countly.yaml create mode 100644 environments/v2-argo/credentials-kafka.yaml create mode 100644 environments/v2-argo/credentials-migration.yaml create mode 100644 environments/v2-argo/credentials-mongodb.yaml create mode 100644 environments/v2-argo/credentials-observability.yaml create mode 100644 environments/v2-argo/external-secrets.example.yaml create mode 100644 environments/v2-argo/global.yaml create mode 100644 environments/v2-argo/image-pull-secrets.example.yaml create mode 100644 environments/v2-argo/kafka.yaml create mode 100644 environments/v2-argo/migration.yaml create mode 100644 environments/v2-argo/mongodb.yaml create mode 100644 environments/v2-argo/observability.yaml create mode 100644 environments/v2-argo/secrets.example.yaml create mode 100644 environments/v2-argo/secrets.sops.example.yaml diff --git a/argocd/customers/v2-argo.yaml b/argocd/customers/v2-argo.yaml new file mode 100644 index 0000000..5e5b1f2 --- /dev/null +++ b/argocd/customers/v2-argo.yaml @@ -0,0 +1,16 @@ +customer: v2-argo +environment: v2-argo +project: countly-customers +server: https://34.123.21.39 +gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: v2-argo +clusterLocation: us-central1-a +hostname: v2-argo.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/v2-argo/README.md b/environments/v2-argo/README.md new file mode 100644 index 0000000..65a5fb3 --- /dev/null +++ b/environments/v2-argo/README.md @@ -0,0 +1,77 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + +3. Fill in required secrets in the chart-specific files: + - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/v2-argo/clickhouse.yaml b/environments/v2-argo/clickhouse.yaml new file mode 100644 index 0000000..17291a9 --- /dev/null +++ b/environments/v2-argo/clickhouse.yaml @@ -0,0 +1,2 @@ +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/v2-argo/cluster-secret-store.gcp.example.yaml b/environments/v2-argo/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..7bb563f --- /dev/null +++ b/environments/v2-argo/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/v2-argo/countly-tls.env b/environments/v2-argo/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/v2-argo/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/v2-argo/countly.yaml b/environments/v2-argo/countly.yaml new file mode 100644 index 0000000..b71d75e --- /dev/null +++ b/environments/v2-argo/countly.yaml @@ -0,0 +1,2 @@ +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/v2-argo/credentials-clickhouse.yaml b/environments/v2-argo/credentials-clickhouse.yaml new file mode 100644 index 0000000..84ee95e --- /dev/null +++ b/environments/v2-argo/credentials-clickhouse.yaml @@ -0,0 +1,10 @@ +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: v2-argo-clickhouse-default-user-password + diff --git a/environments/v2-argo/credentials-countly.yaml b/environments/v2-argo/credentials-countly.yaml new file mode 100644 index 0000000..d01f504 --- /dev/null +++ b/environments/v2-argo/credentials-countly.yaml @@ -0,0 +1,22 @@ +secrets: + mode: externalSecret + clickhouse: + username: "default" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: v2-argo-countly-encryption-reports-key + webSessionSecret: v2-argo-countly-web-session-secret + passwordSecret: v2-argo-countly-password-secret + clickhouse: + password: v2-argo-countly-clickhouse-password + mongodb: + password: v2-argo-countly-mongodb-password + diff --git a/environments/v2-argo/credentials-kafka.yaml b/environments/v2-argo/credentials-kafka.yaml new file mode 100644 index 0000000..533026f --- /dev/null +++ b/environments/v2-argo/credentials-kafka.yaml @@ -0,0 +1,11 @@ +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + password: v2-argo-kafka-connect-clickhouse-password + diff --git a/environments/v2-argo/credentials-migration.yaml b/environments/v2-argo/credentials-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/v2-argo/credentials-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/v2-argo/credentials-mongodb.yaml b/environments/v2-argo/credentials-mongodb.yaml new file mode 100644 index 0000000..0c1622e --- /dev/null +++ b/environments/v2-argo/credentials-mongodb.yaml @@ -0,0 +1,21 @@ +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: v2-argo-mongodb-admin-password + app: + password: v2-argo-mongodb-app-password + metrics: + password: v2-argo-mongodb-metrics-password + +users: + admin: + enabled: true + metrics: + enabled: true + diff --git a/environments/v2-argo/credentials-observability.yaml b/environments/v2-argo/credentials-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/v2-argo/credentials-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/v2-argo/external-secrets.example.yaml b/environments/v2-argo/external-secrets.example.yaml new file mode 100644 index 0000000..35e59ad --- /dev/null +++ b/environments/v2-argo/external-secrets.example.yaml @@ -0,0 +1,84 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in the chart-specific secrets files under environments//: +# +# environments//credentials-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" +# clickhouse: +# password: "acme-countly-clickhouse-password" +# mongodb: +# password: "acme-countly-mongodb-password" +# +# environments//credentials-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "acme-clickhouse-default-user-password" +# +# environments//credentials-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# password: "acme-kafka-connect-clickhouse-password" +# +# environments//credentials-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# admin: +# password: "acme-mongodb-admin-password" +# app: +# password: "acme-mongodb-app-password" +# metrics: +# password: "acme-mongodb-metrics-password" +# +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "acme-gar-dockerconfig" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/v2-argo/global.yaml b/environments/v2-argo/global.yaml new file mode 100644 index 0000000..85831d8 --- /dev/null +++ b/environments/v2-argo/global.yaml @@ -0,0 +1,39 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + imageSource: + mode: direct + gcpArtifactRegistry: + repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: "gcp-secrets" + kind: ClusterSecretStore + remoteRef: + key: "customers-gcr-argo-gar-dockerconfig" + storageClass: "" + imagePullSecrets: + - name: countly-registry + +ingress: + hostname: v2-argo.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/v2-argo/image-pull-secrets.example.yaml b/environments/v2-argo/image-pull-secrets.example.yaml new file mode 100644 index 0000000..f1f537f --- /dev/null +++ b/environments/v2-argo/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-gar" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/v2-argo/kafka.yaml b/environments/v2-argo/kafka.yaml new file mode 100644 index 0000000..ff6fe5e --- /dev/null +++ b/environments/v2-argo/kafka.yaml @@ -0,0 +1,2 @@ +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/v2-argo/migration.yaml b/environments/v2-argo/migration.yaml new file mode 100644 index 0000000..fddc542 --- /dev/null +++ b/environments/v2-argo/migration.yaml @@ -0,0 +1 @@ +# Customer-specific migration overrides only. diff --git a/environments/v2-argo/mongodb.yaml b/environments/v2-argo/mongodb.yaml new file mode 100644 index 0000000..ebe28cc --- /dev/null +++ b/environments/v2-argo/mongodb.yaml @@ -0,0 +1,2 @@ +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/v2-argo/observability.yaml b/environments/v2-argo/observability.yaml new file mode 100644 index 0000000..95d895f --- /dev/null +++ b/environments/v2-argo/observability.yaml @@ -0,0 +1 @@ +# Customer-specific observability overrides only. diff --git a/environments/v2-argo/secrets.example.yaml b/environments/v2-argo/secrets.example.yaml new file mode 100644 index 0000000..181cb54 --- /dev/null +++ b/environments/v2-argo/secrets.example.yaml @@ -0,0 +1,62 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//credentials-countly.yaml) --- +secrets: + mode: values + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- +secrets: + mode: values +users: + admin: + enabled: true + password: "CHANGEME-super-admin" + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- +secrets: + mode: values +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- +secrets: + mode: values +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" + +# For External Secrets Operator, switch the per-chart file to: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore diff --git a/environments/v2-argo/secrets.sops.example.yaml b/environments/v2-argo/secrets.sops.example.yaml new file mode 100644 index 0000000..7335b8d --- /dev/null +++ b/environments/v2-argo/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//credentials-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//credentials-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From c8ecfa45a15144141f668e44d7c6af23757aff5b Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 20:59:33 +0530 Subject: [PATCH 59/79] Remove v2-argo test customer --- argocd/customers/v2-argo.yaml | 16 ---- environments/v2-argo/README.md | 77 ----------------- environments/v2-argo/clickhouse.yaml | 2 - .../cluster-secret-store.gcp.example.yaml | 31 ------- environments/v2-argo/countly-tls.env | 7 -- environments/v2-argo/countly.yaml | 2 - .../v2-argo/credentials-clickhouse.yaml | 10 --- environments/v2-argo/credentials-countly.yaml | 22 ----- environments/v2-argo/credentials-kafka.yaml | 11 --- .../v2-argo/credentials-migration.yaml | 2 - environments/v2-argo/credentials-mongodb.yaml | 21 ----- .../v2-argo/credentials-observability.yaml | 2 - .../v2-argo/external-secrets.example.yaml | 84 ------------------- environments/v2-argo/global.yaml | 39 --------- .../v2-argo/image-pull-secrets.example.yaml | 41 --------- environments/v2-argo/kafka.yaml | 2 - environments/v2-argo/migration.yaml | 1 - environments/v2-argo/mongodb.yaml | 2 - environments/v2-argo/observability.yaml | 1 - environments/v2-argo/secrets.example.yaml | 62 -------------- .../v2-argo/secrets.sops.example.yaml | 21 ----- 21 files changed, 456 deletions(-) delete mode 100644 argocd/customers/v2-argo.yaml delete mode 100644 environments/v2-argo/README.md delete mode 100644 environments/v2-argo/clickhouse.yaml delete mode 100644 environments/v2-argo/cluster-secret-store.gcp.example.yaml delete mode 100644 environments/v2-argo/countly-tls.env delete mode 100644 environments/v2-argo/countly.yaml delete mode 100644 environments/v2-argo/credentials-clickhouse.yaml delete mode 100644 environments/v2-argo/credentials-countly.yaml delete mode 100644 environments/v2-argo/credentials-kafka.yaml delete mode 100644 environments/v2-argo/credentials-migration.yaml delete mode 100644 environments/v2-argo/credentials-mongodb.yaml delete mode 100644 environments/v2-argo/credentials-observability.yaml delete mode 100644 environments/v2-argo/external-secrets.example.yaml delete mode 100644 environments/v2-argo/global.yaml delete mode 100644 environments/v2-argo/image-pull-secrets.example.yaml delete mode 100644 environments/v2-argo/kafka.yaml delete mode 100644 environments/v2-argo/migration.yaml delete mode 100644 environments/v2-argo/mongodb.yaml delete mode 100644 environments/v2-argo/observability.yaml delete mode 100644 environments/v2-argo/secrets.example.yaml delete mode 100644 environments/v2-argo/secrets.sops.example.yaml diff --git a/argocd/customers/v2-argo.yaml b/argocd/customers/v2-argo.yaml deleted file mode 100644 index 5e5b1f2..0000000 --- a/argocd/customers/v2-argo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -customer: v2-argo -environment: v2-argo -project: countly-customers -server: https://34.123.21.39 -gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com -secretManagerProjectID: countly-tools -clusterProjectID: countly-dev-313620 -clusterName: v2-argo -clusterLocation: us-central1-a -hostname: v2-argo.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/environments/v2-argo/README.md b/environments/v2-argo/README.md deleted file mode 100644 index 65a5fb3..0000000 --- a/environments/v2-argo/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` - -3. Fill in required secrets in the chart-specific files: - - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -For private registries such as GAR, also create namespaced image pull secrets. -Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. -If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `credentials-mongodb.yaml` | MongoDB user passwords | -| `credentials-clickhouse.yaml` | ClickHouse auth password | -| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | -| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | -| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | -| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/v2-argo/clickhouse.yaml b/environments/v2-argo/clickhouse.yaml deleted file mode 100644 index 17291a9..0000000 --- a/environments/v2-argo/clickhouse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific ClickHouse overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/v2-argo/cluster-secret-store.gcp.example.yaml b/environments/v2-argo/cluster-secret-store.gcp.example.yaml deleted file mode 100644 index 7bb563f..0000000 --- a/environments/v2-argo/cluster-secret-store.gcp.example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# External Secrets Operator + Google Secret Manager -# ClusterSecretStore Example -# ============================================================================= -# Apply this once per cluster after External Secrets Operator is installed. -# -# Prerequisites: -# - The external-secrets controller service account is annotated for Workload -# Identity with a GCP service account that can read Secret Manager secrets. -# - The GCP service account has at least: -# roles/secretmanager.secretAccessor -# -# This file is a reference only. Adapt project IDs and names to your cluster. -# ============================================================================= - -apiVersion: external-secrets.io/v1 -kind: ClusterSecretStore -metadata: - name: gcp-secrets -spec: - provider: - gcpsm: - projectID: countly-dev-313620 - auth: - workloadIdentity: - clusterLocation: us-central1 - clusterName: change-me - clusterProjectID: countly-dev-313620 - serviceAccountRef: - name: external-secrets - namespace: external-secrets diff --git a/environments/v2-argo/countly-tls.env b/environments/v2-argo/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/v2-argo/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/v2-argo/countly.yaml b/environments/v2-argo/countly.yaml deleted file mode 100644 index b71d75e..0000000 --- a/environments/v2-argo/countly.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Countly overrides only. -# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/v2-argo/credentials-clickhouse.yaml b/environments/v2-argo/credentials-clickhouse.yaml deleted file mode 100644 index 84ee95e..0000000 --- a/environments/v2-argo/credentials-clickhouse.yaml +++ /dev/null @@ -1,10 +0,0 @@ -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - defaultUserPassword: v2-argo-clickhouse-default-user-password - diff --git a/environments/v2-argo/credentials-countly.yaml b/environments/v2-argo/credentials-countly.yaml deleted file mode 100644 index d01f504..0000000 --- a/environments/v2-argo/credentials-countly.yaml +++ /dev/null @@ -1,22 +0,0 @@ -secrets: - mode: externalSecret - clickhouse: - username: "default" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: v2-argo-countly-encryption-reports-key - webSessionSecret: v2-argo-countly-web-session-secret - passwordSecret: v2-argo-countly-password-secret - clickhouse: - password: v2-argo-countly-clickhouse-password - mongodb: - password: v2-argo-countly-mongodb-password - diff --git a/environments/v2-argo/credentials-kafka.yaml b/environments/v2-argo/credentials-kafka.yaml deleted file mode 100644 index 533026f..0000000 --- a/environments/v2-argo/credentials-kafka.yaml +++ /dev/null @@ -1,11 +0,0 @@ -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - clickhouse: - password: v2-argo-kafka-connect-clickhouse-password - diff --git a/environments/v2-argo/credentials-migration.yaml b/environments/v2-argo/credentials-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/v2-argo/credentials-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/v2-argo/credentials-mongodb.yaml b/environments/v2-argo/credentials-mongodb.yaml deleted file mode 100644 index 0c1622e..0000000 --- a/environments/v2-argo/credentials-mongodb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - admin: - password: v2-argo-mongodb-admin-password - app: - password: v2-argo-mongodb-app-password - metrics: - password: v2-argo-mongodb-metrics-password - -users: - admin: - enabled: true - metrics: - enabled: true - diff --git a/environments/v2-argo/credentials-observability.yaml b/environments/v2-argo/credentials-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/v2-argo/credentials-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/v2-argo/external-secrets.example.yaml b/environments/v2-argo/external-secrets.example.yaml deleted file mode 100644 index 35e59ad..0000000 --- a/environments/v2-argo/external-secrets.example.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in the chart-specific secrets files under environments//: -# -# environments//credentials-countly.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "acme-countly-encryption-reports-key" -# webSessionSecret: "acme-countly-web-session-secret" -# passwordSecret: "acme-countly-password-secret" -# clickhouse: -# password: "acme-countly-clickhouse-password" -# mongodb: -# password: "acme-countly-mongodb-password" -# -# environments//credentials-clickhouse.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# defaultUserPassword: "acme-clickhouse-default-user-password" -# -# environments//credentials-kafka.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# clickhouse: -# password: "acme-kafka-connect-clickhouse-password" -# -# environments//credentials-mongodb.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# admin: -# password: "acme-mongodb-admin-password" -# app: -# password: "acme-mongodb-app-password" -# metrics: -# password: "acme-mongodb-metrics-password" -# -# For GAR image pulls, configure this in environments//global.yaml: -# -# global: -# imagePullSecrets: -# - name: countly-registry -# imagePullSecretExternalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRef: -# key: "acme-gar-dockerconfig" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/v2-argo/global.yaml b/environments/v2-argo/global.yaml deleted file mode 100644 index 85831d8..0000000 --- a/environments/v2-argo/global.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - imageSource: - mode: direct - gcpArtifactRegistry: - repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: "gcp-secrets" - kind: ClusterSecretStore - remoteRef: - key: "customers-gcr-argo-gar-dockerconfig" - storageClass: "" - imagePullSecrets: - - name: countly-registry - -ingress: - hostname: v2-argo.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/v2-argo/image-pull-secrets.example.yaml b/environments/v2-argo/image-pull-secrets.example.yaml deleted file mode 100644 index f1f537f..0000000 --- a/environments/v2-argo/image-pull-secrets.example.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# ============================================================================= -# Image Pull Secrets Example -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. -# -# Use this when Countly and Kafka Connect pull from a private registry -# such as GCP Artifact Registry (GAR). -# -# Replace: -# - metadata.name with your actual secret name if not using "countly-gar" -# - namespaces if your releases run elsewhere -# - .dockerconfigjson with the base64-encoded contents of your Docker config -# -# You need one secret per namespace because imagePullSecrets are namespaced. -# For the default layout in this repo, create the same secret in: -# - countly -# - kafka -# -# Example source for the Docker config: -# cat ~/.docker/config.json | base64 | tr -d '\n' -# -# Kubernetes secret type must be: kubernetes.io/dockerconfigjson -# ============================================================================= - -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: countly -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON ---- -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: kafka -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/v2-argo/kafka.yaml b/environments/v2-argo/kafka.yaml deleted file mode 100644 index ff6fe5e..0000000 --- a/environments/v2-argo/kafka.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Kafka overrides only. -# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/v2-argo/migration.yaml b/environments/v2-argo/migration.yaml deleted file mode 100644 index fddc542..0000000 --- a/environments/v2-argo/migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific migration overrides only. diff --git a/environments/v2-argo/mongodb.yaml b/environments/v2-argo/mongodb.yaml deleted file mode 100644 index ebe28cc..0000000 --- a/environments/v2-argo/mongodb.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific MongoDB overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/v2-argo/observability.yaml b/environments/v2-argo/observability.yaml deleted file mode 100644 index 95d895f..0000000 --- a/environments/v2-argo/observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific observability overrides only. diff --git a/environments/v2-argo/secrets.example.yaml b/environments/v2-argo/secrets.example.yaml deleted file mode 100644 index 181cb54..0000000 --- a/environments/v2-argo/secrets.example.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//credentials-countly.yaml) --- -secrets: - mode: values - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- -secrets: - mode: values -users: - admin: - enabled: true - password: "CHANGEME-super-admin" - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- -secrets: - mode: values -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//credentials-kafka.yaml) --- -secrets: - mode: values -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" - -# For External Secrets Operator, switch the per-chart file to: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore diff --git a/environments/v2-argo/secrets.sops.example.yaml b/environments/v2-argo/secrets.sops.example.yaml deleted file mode 100644 index 7335b8d..0000000 --- a/environments/v2-argo/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//credentials-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//credentials-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From d09b093da834cb7d4468363bda3317ad69d540f9 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 21:13:50 +0530 Subject: [PATCH 60/79] Clarify cluster identity onboarding --- argocd/ONBOARDING.md | 191 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 180 insertions(+), 11 deletions(-) diff --git a/argocd/ONBOARDING.md b/argocd/ONBOARDING.md index aeaa29c..d04b875 100644 --- a/argocd/ONBOARDING.md +++ b/argocd/ONBOARDING.md @@ -333,7 +333,27 @@ After customer metadata is committed, `countly-bootstrap` will create them. ### 5.2 Enable Workload Identity On The Cluster -Check: +This is the part that usually feels confusing the first time. + +Simple version: +- Kubernetes pods inside the customer cluster need a safe way to prove who they are +- Google Secret Manager only gives secrets to identities it trusts +- Workload Identity is the bridge between those two things + +If this is not configured, External Secrets will not be able to read passwords from Google Secret Manager. + +#### 5.2.1 Check Whether Workload Identity Is Already Enabled + +Who runs this: +- the person onboarding the customer cluster + +What this does: +- asks GKE whether the cluster already supports Workload Identity + +Why this matters: +- without this, the `external-secrets` pod cannot authenticate to Google Secret Manager + +Command: ```bash gcloud container clusters describe \ @@ -342,13 +362,61 @@ gcloud container clusters describe \ --format="value(workloadIdentityConfig.workloadPool)" ``` -Expected: +What good looks like: ```text .svc.id.goog ``` -Then check node pool metadata mode: +If the output is empty: +- Workload Identity is not enabled yet +- you must enable it before moving on + +#### 5.2.2 Turn Workload Identity On If It Is Missing + +Who runs this: +- the cluster administrator + +What this does: +- tells the cluster to trust Kubernetes service accounts as Google identities + +Why this matters: +- this is what allows the `external-secrets` pod to read from Google Secret Manager without using a static key file + +Command: + +```bash +gcloud container clusters update \ + --zone \ + --project \ + --workload-pool=.svc.id.goog +``` + +What good looks like: +- the command succeeds +- running the check again shows `.svc.id.goog` + +#### 5.2.3 Check The Node Pool Metadata Mode + +Who runs this: +- the cluster administrator + +What this does: +- checks whether the node pool is exposing the GKE metadata server in the correct way + +Why this matters: +- even if Workload Identity is enabled on the cluster, pods still need the node pool configured correctly to use it + +First list the node pools: + +```bash +gcloud container node-pools list \ + --cluster \ + --zone \ + --project +``` + +Then check each node pool: ```bash gcloud container node-pools describe \ @@ -358,25 +426,126 @@ gcloud container node-pools describe \ --format="value(config.workloadMetadataConfig.mode)" ``` -Expected: +What good looks like: ```text GKE_METADATA ``` -If these are wrong, External Secrets will fail. +If it is not `GKE_METADATA`, update it: + +```bash +gcloud container node-pools update \ + --cluster \ + --zone \ + --project \ + --workload-metadata=GKE_METADATA +``` + +#### 5.2.4 Quick Mental Model + +If you want a very simple way to remember this: + +- cluster Workload Identity: + - lets the cluster speak Google IAM +- node pool metadata mode: + - lets the pod actually use that identity on the node + +You need both. ### 5.3 Bind The Kubernetes Service Account To The GCP Service Account -The External Secrets service account in namespace `external-secrets` must be annotated with: +This is the second half of the setup. -```yaml -iam.gke.io/gcp-service-account: +Simple version: +- the `external-secrets` pod runs as a Kubernetes service account +- that Kubernetes service account must be linked to a Google service account +- that Google service account is the one allowed to read secrets + +#### 5.3.1 Understand The Two Identities + +There are two different identities here: + +1. Kubernetes service account: + - usually `external-secrets` in namespace `external-secrets` + - this is the identity used by the pod inside the cluster + +2. Google service account: + - something like `northstar-eso@example-secrets-project.iam.gserviceaccount.com` + - this is the identity Google Secret Manager trusts + +Workload Identity links those two together. + +#### 5.3.2 Allow The Kubernetes Service Account To Act As The Google Service Account + +Who runs this: +- someone with IAM permission on the Google service account project + +What this does: +- tells Google IAM that the `external-secrets` Kubernetes service account is allowed to act as the chosen Google service account + +Why this matters: +- without this, the pod exists, but Google still does not trust it + +Command: + +```bash +gcloud iam service-accounts add-iam-policy-binding \ + \ + --project= \ + --role=roles/iam.workloadIdentityUser \ + --member="serviceAccount:.svc.id.goog[external-secrets/external-secrets]" ``` -The GCP service account also needs: -- `roles/iam.workloadIdentityUser` -- access to the Secret Manager secrets you want to read +What good looks like: +- the command succeeds +- the binding appears in the Google service account IAM policy + +#### 5.3.3 Allow The Google Service Account To Read Secrets + +Who runs this: +- someone with IAM permission on the Secret Manager project + +What this does: +- gives the Google service account permission to read secrets from the chosen Secret Manager project + +Why this matters: +- the identity link can be correct, but secret reads still fail if this permission is missing + +Command: + +```bash +gcloud projects add-iam-policy-binding \ + --member="serviceAccount:" \ + --role=roles/secretmanager.secretAccessor +``` + +What good looks like: +- the command succeeds +- the Google service account can read the expected secrets + +#### 5.3.4 Verify The Kubernetes Service Account Annotation + +Who runs this: +- the platform operator after Argo has installed the External Secrets Operator + +What this does: +- checks that the in-cluster Kubernetes service account is annotated with the Google service account email + +Why this matters: +- this annotation is how GKE knows which Google service account the pod should use + +Command: + +```bash +kubectl get sa -n external-secrets external-secrets -o yaml +``` + +What you should see: + +```yaml +iam.gke.io/gcp-service-account: +``` ### 5.4 Verify The ClusterSecretStore From 8e0438ebb5870317200da8820e4c3f4426103642 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 22:43:32 +0530 Subject: [PATCH 61/79] Add gcr-argo customer --- argocd/customers/gcr-argo.yaml | 16 ++++ environments/gcr-argo/README.md | 77 +++++++++++++++++ environments/gcr-argo/clickhouse.yaml | 2 + .../cluster-secret-store.gcp.example.yaml | 31 +++++++ environments/gcr-argo/countly-tls.env | 7 ++ environments/gcr-argo/countly.yaml | 2 + .../gcr-argo/credentials-clickhouse.yaml | 10 +++ .../gcr-argo/credentials-countly.yaml | 23 +++++ environments/gcr-argo/credentials-kafka.yaml | 11 +++ .../gcr-argo/credentials-migration.yaml | 2 + .../gcr-argo/credentials-mongodb.yaml | 21 +++++ .../gcr-argo/credentials-observability.yaml | 2 + .../gcr-argo/external-secrets.example.yaml | 84 +++++++++++++++++++ environments/gcr-argo/global.yaml | 39 +++++++++ .../gcr-argo/image-pull-secrets.example.yaml | 41 +++++++++ environments/gcr-argo/kafka.yaml | 2 + environments/gcr-argo/migration.yaml | 1 + environments/gcr-argo/mongodb.yaml | 2 + environments/gcr-argo/observability.yaml | 1 + environments/gcr-argo/secrets.example.yaml | 62 ++++++++++++++ .../gcr-argo/secrets.sops.example.yaml | 21 +++++ 21 files changed, 457 insertions(+) create mode 100644 argocd/customers/gcr-argo.yaml create mode 100644 environments/gcr-argo/README.md create mode 100644 environments/gcr-argo/clickhouse.yaml create mode 100644 environments/gcr-argo/cluster-secret-store.gcp.example.yaml create mode 100644 environments/gcr-argo/countly-tls.env create mode 100644 environments/gcr-argo/countly.yaml create mode 100644 environments/gcr-argo/credentials-clickhouse.yaml create mode 100644 environments/gcr-argo/credentials-countly.yaml create mode 100644 environments/gcr-argo/credentials-kafka.yaml create mode 100644 environments/gcr-argo/credentials-migration.yaml create mode 100644 environments/gcr-argo/credentials-mongodb.yaml create mode 100644 environments/gcr-argo/credentials-observability.yaml create mode 100644 environments/gcr-argo/external-secrets.example.yaml create mode 100644 environments/gcr-argo/global.yaml create mode 100644 environments/gcr-argo/image-pull-secrets.example.yaml create mode 100644 environments/gcr-argo/kafka.yaml create mode 100644 environments/gcr-argo/migration.yaml create mode 100644 environments/gcr-argo/mongodb.yaml create mode 100644 environments/gcr-argo/observability.yaml create mode 100644 environments/gcr-argo/secrets.example.yaml create mode 100644 environments/gcr-argo/secrets.sops.example.yaml diff --git a/argocd/customers/gcr-argo.yaml b/argocd/customers/gcr-argo.yaml new file mode 100644 index 0000000..c31a647 --- /dev/null +++ b/argocd/customers/gcr-argo.yaml @@ -0,0 +1,16 @@ +customer: gcr-argo +environment: gcr-argo +project: countly-customers +server: https://34.123.21.39 +gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: gcr-argo +clusterLocation: us-central1-a +hostname: gcr-argo.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/gcr-argo/README.md b/environments/gcr-argo/README.md new file mode 100644 index 0000000..65a5fb3 --- /dev/null +++ b/environments/gcr-argo/README.md @@ -0,0 +1,77 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + +3. Fill in required secrets in the chart-specific files: + - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/gcr-argo/clickhouse.yaml b/environments/gcr-argo/clickhouse.yaml new file mode 100644 index 0000000..17291a9 --- /dev/null +++ b/environments/gcr-argo/clickhouse.yaml @@ -0,0 +1,2 @@ +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/gcr-argo/cluster-secret-store.gcp.example.yaml b/environments/gcr-argo/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..7bb563f --- /dev/null +++ b/environments/gcr-argo/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/gcr-argo/countly-tls.env b/environments/gcr-argo/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/gcr-argo/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/gcr-argo/countly.yaml b/environments/gcr-argo/countly.yaml new file mode 100644 index 0000000..b71d75e --- /dev/null +++ b/environments/gcr-argo/countly.yaml @@ -0,0 +1,2 @@ +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/gcr-argo/credentials-clickhouse.yaml b/environments/gcr-argo/credentials-clickhouse.yaml new file mode 100644 index 0000000..eef6bee --- /dev/null +++ b/environments/gcr-argo/credentials-clickhouse.yaml @@ -0,0 +1,10 @@ +# ClickHouse secrets — FILL IN before first deploy +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: gcr-argo-clickhouse-default-user-password diff --git a/environments/gcr-argo/credentials-countly.yaml b/environments/gcr-argo/credentials-countly.yaml new file mode 100644 index 0000000..1f9bb8f --- /dev/null +++ b/environments/gcr-argo/credentials-countly.yaml @@ -0,0 +1,23 @@ +# Countly secrets — FILL IN before first deploy +# Passwords must match across charts (see secrets.example.yaml) +secrets: + mode: externalSecret + clickhouse: + username: "default" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: gcr-argo-countly-encryption-reports-key + webSessionSecret: gcr-argo-countly-web-session-secret + passwordSecret: gcr-argo-countly-password-secret + clickhouse: + password: gcr-argo-countly-clickhouse-password + mongodb: + password: gcr-argo-countly-mongodb-password diff --git a/environments/gcr-argo/credentials-kafka.yaml b/environments/gcr-argo/credentials-kafka.yaml new file mode 100644 index 0000000..da8bd9d --- /dev/null +++ b/environments/gcr-argo/credentials-kafka.yaml @@ -0,0 +1,11 @@ +# Kafka secrets — FILL IN before first deploy +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + password: gcr-argo-kafka-connect-clickhouse-password diff --git a/environments/gcr-argo/credentials-migration.yaml b/environments/gcr-argo/credentials-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/gcr-argo/credentials-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/gcr-argo/credentials-mongodb.yaml b/environments/gcr-argo/credentials-mongodb.yaml new file mode 100644 index 0000000..45f6e79 --- /dev/null +++ b/environments/gcr-argo/credentials-mongodb.yaml @@ -0,0 +1,21 @@ +# MongoDB secrets — FILL IN before first deploy +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: gcr-argo-mongodb-admin-password + app: + password: gcr-argo-mongodb-app-password + metrics: + password: gcr-argo-mongodb-metrics-password + +users: + admin: + enabled: true + metrics: + enabled: true diff --git a/environments/gcr-argo/credentials-observability.yaml b/environments/gcr-argo/credentials-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/gcr-argo/credentials-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/gcr-argo/external-secrets.example.yaml b/environments/gcr-argo/external-secrets.example.yaml new file mode 100644 index 0000000..35e59ad --- /dev/null +++ b/environments/gcr-argo/external-secrets.example.yaml @@ -0,0 +1,84 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in the chart-specific secrets files under environments//: +# +# environments//credentials-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" +# clickhouse: +# password: "acme-countly-clickhouse-password" +# mongodb: +# password: "acme-countly-mongodb-password" +# +# environments//credentials-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "acme-clickhouse-default-user-password" +# +# environments//credentials-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# password: "acme-kafka-connect-clickhouse-password" +# +# environments//credentials-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# admin: +# password: "acme-mongodb-admin-password" +# app: +# password: "acme-mongodb-app-password" +# metrics: +# password: "acme-mongodb-metrics-password" +# +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "acme-gar-dockerconfig" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/gcr-argo/global.yaml b/environments/gcr-argo/global.yaml new file mode 100644 index 0000000..ab59ecf --- /dev/null +++ b/environments/gcr-argo/global.yaml @@ -0,0 +1,39 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: "gcp-secrets" + kind: ClusterSecretStore + remoteRef: + key: "gcr-argo-gar-dockerconfig" + storageClass: "" + imagePullSecrets: + - name: countly-registry + +ingress: + hostname: gcr-argo.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/gcr-argo/image-pull-secrets.example.yaml b/environments/gcr-argo/image-pull-secrets.example.yaml new file mode 100644 index 0000000..f1f537f --- /dev/null +++ b/environments/gcr-argo/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-gar" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/gcr-argo/kafka.yaml b/environments/gcr-argo/kafka.yaml new file mode 100644 index 0000000..ff6fe5e --- /dev/null +++ b/environments/gcr-argo/kafka.yaml @@ -0,0 +1,2 @@ +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/gcr-argo/migration.yaml b/environments/gcr-argo/migration.yaml new file mode 100644 index 0000000..fddc542 --- /dev/null +++ b/environments/gcr-argo/migration.yaml @@ -0,0 +1 @@ +# Customer-specific migration overrides only. diff --git a/environments/gcr-argo/mongodb.yaml b/environments/gcr-argo/mongodb.yaml new file mode 100644 index 0000000..ebe28cc --- /dev/null +++ b/environments/gcr-argo/mongodb.yaml @@ -0,0 +1,2 @@ +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/gcr-argo/observability.yaml b/environments/gcr-argo/observability.yaml new file mode 100644 index 0000000..95d895f --- /dev/null +++ b/environments/gcr-argo/observability.yaml @@ -0,0 +1 @@ +# Customer-specific observability overrides only. diff --git a/environments/gcr-argo/secrets.example.yaml b/environments/gcr-argo/secrets.example.yaml new file mode 100644 index 0000000..181cb54 --- /dev/null +++ b/environments/gcr-argo/secrets.example.yaml @@ -0,0 +1,62 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//credentials-countly.yaml) --- +secrets: + mode: values + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- +secrets: + mode: values +users: + admin: + enabled: true + password: "CHANGEME-super-admin" + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- +secrets: + mode: values +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- +secrets: + mode: values +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" + +# For External Secrets Operator, switch the per-chart file to: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore diff --git a/environments/gcr-argo/secrets.sops.example.yaml b/environments/gcr-argo/secrets.sops.example.yaml new file mode 100644 index 0000000..7335b8d --- /dev/null +++ b/environments/gcr-argo/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//credentials-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//credentials-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From 5390e9152b7905b37d07c3aa21e3d38105906cca Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 22:49:11 +0530 Subject: [PATCH 62/79] Fix gcr-argo cluster endpoint --- argocd/customers/gcr-argo.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/argocd/customers/gcr-argo.yaml b/argocd/customers/gcr-argo.yaml index c31a647..8c96185 100644 --- a/argocd/customers/gcr-argo.yaml +++ b/argocd/customers/gcr-argo.yaml @@ -1,7 +1,7 @@ customer: gcr-argo environment: gcr-argo project: countly-customers -server: https://34.123.21.39 +server: https://34.16.108.212 gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com secretManagerProjectID: countly-tools clusterProjectID: countly-dev-313620 From 61ee52178f9358d970c9d33a3b41e224ca5a558f Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 23:13:25 +0530 Subject: [PATCH 63/79] Reduce gcr-argo Kafka Connect memory --- environments/gcr-argo/kafka.yaml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/environments/gcr-argo/kafka.yaml b/environments/gcr-argo/kafka.yaml index ff6fe5e..7f40191 100644 --- a/environments/gcr-argo/kafka.yaml +++ b/environments/gcr-argo/kafka.yaml @@ -1,2 +1,13 @@ # Customer-specific Kafka overrides only. -# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. +# Keep tier1 defaults intact for other customers; this cluster needs a smaller +# Kafka Connect footprint to fit on the current node pool. + +kafkaConnect: + resources: + requests: + memory: "4Gi" + limits: + memory: "4Gi" + jvmOptions: + xms: "2g" + xmx: "2g" From a27ed2877903e7c2f651f574fba0eb17d439a4f4 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 23:22:36 +0530 Subject: [PATCH 64/79] Remove gcr-argo test customer --- argocd/customers/gcr-argo.yaml | 16 ---- environments/gcr-argo/README.md | 77 ----------------- environments/gcr-argo/clickhouse.yaml | 2 - .../cluster-secret-store.gcp.example.yaml | 31 ------- environments/gcr-argo/countly-tls.env | 7 -- environments/gcr-argo/countly.yaml | 2 - .../gcr-argo/credentials-clickhouse.yaml | 10 --- .../gcr-argo/credentials-countly.yaml | 23 ----- environments/gcr-argo/credentials-kafka.yaml | 11 --- .../gcr-argo/credentials-migration.yaml | 2 - .../gcr-argo/credentials-mongodb.yaml | 21 ----- .../gcr-argo/credentials-observability.yaml | 2 - .../gcr-argo/external-secrets.example.yaml | 84 ------------------- environments/gcr-argo/global.yaml | 39 --------- .../gcr-argo/image-pull-secrets.example.yaml | 41 --------- environments/gcr-argo/kafka.yaml | 13 --- environments/gcr-argo/migration.yaml | 1 - environments/gcr-argo/mongodb.yaml | 2 - environments/gcr-argo/observability.yaml | 1 - environments/gcr-argo/secrets.example.yaml | 62 -------------- .../gcr-argo/secrets.sops.example.yaml | 21 ----- 21 files changed, 468 deletions(-) delete mode 100644 argocd/customers/gcr-argo.yaml delete mode 100644 environments/gcr-argo/README.md delete mode 100644 environments/gcr-argo/clickhouse.yaml delete mode 100644 environments/gcr-argo/cluster-secret-store.gcp.example.yaml delete mode 100644 environments/gcr-argo/countly-tls.env delete mode 100644 environments/gcr-argo/countly.yaml delete mode 100644 environments/gcr-argo/credentials-clickhouse.yaml delete mode 100644 environments/gcr-argo/credentials-countly.yaml delete mode 100644 environments/gcr-argo/credentials-kafka.yaml delete mode 100644 environments/gcr-argo/credentials-migration.yaml delete mode 100644 environments/gcr-argo/credentials-mongodb.yaml delete mode 100644 environments/gcr-argo/credentials-observability.yaml delete mode 100644 environments/gcr-argo/external-secrets.example.yaml delete mode 100644 environments/gcr-argo/global.yaml delete mode 100644 environments/gcr-argo/image-pull-secrets.example.yaml delete mode 100644 environments/gcr-argo/kafka.yaml delete mode 100644 environments/gcr-argo/migration.yaml delete mode 100644 environments/gcr-argo/mongodb.yaml delete mode 100644 environments/gcr-argo/observability.yaml delete mode 100644 environments/gcr-argo/secrets.example.yaml delete mode 100644 environments/gcr-argo/secrets.sops.example.yaml diff --git a/argocd/customers/gcr-argo.yaml b/argocd/customers/gcr-argo.yaml deleted file mode 100644 index 8c96185..0000000 --- a/argocd/customers/gcr-argo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -customer: gcr-argo -environment: gcr-argo -project: countly-customers -server: https://34.16.108.212 -gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com -secretManagerProjectID: countly-tools -clusterProjectID: countly-dev-313620 -clusterName: gcr-argo -clusterLocation: us-central1-a -hostname: gcr-argo.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/environments/gcr-argo/README.md b/environments/gcr-argo/README.md deleted file mode 100644 index 65a5fb3..0000000 --- a/environments/gcr-argo/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` - -3. Fill in required secrets in the chart-specific files: - - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -For private registries such as GAR, also create namespaced image pull secrets. -Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. -If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `credentials-mongodb.yaml` | MongoDB user passwords | -| `credentials-clickhouse.yaml` | ClickHouse auth password | -| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | -| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | -| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | -| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/gcr-argo/clickhouse.yaml b/environments/gcr-argo/clickhouse.yaml deleted file mode 100644 index 17291a9..0000000 --- a/environments/gcr-argo/clickhouse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific ClickHouse overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/gcr-argo/cluster-secret-store.gcp.example.yaml b/environments/gcr-argo/cluster-secret-store.gcp.example.yaml deleted file mode 100644 index 7bb563f..0000000 --- a/environments/gcr-argo/cluster-secret-store.gcp.example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# External Secrets Operator + Google Secret Manager -# ClusterSecretStore Example -# ============================================================================= -# Apply this once per cluster after External Secrets Operator is installed. -# -# Prerequisites: -# - The external-secrets controller service account is annotated for Workload -# Identity with a GCP service account that can read Secret Manager secrets. -# - The GCP service account has at least: -# roles/secretmanager.secretAccessor -# -# This file is a reference only. Adapt project IDs and names to your cluster. -# ============================================================================= - -apiVersion: external-secrets.io/v1 -kind: ClusterSecretStore -metadata: - name: gcp-secrets -spec: - provider: - gcpsm: - projectID: countly-dev-313620 - auth: - workloadIdentity: - clusterLocation: us-central1 - clusterName: change-me - clusterProjectID: countly-dev-313620 - serviceAccountRef: - name: external-secrets - namespace: external-secrets diff --git a/environments/gcr-argo/countly-tls.env b/environments/gcr-argo/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/gcr-argo/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/gcr-argo/countly.yaml b/environments/gcr-argo/countly.yaml deleted file mode 100644 index b71d75e..0000000 --- a/environments/gcr-argo/countly.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Countly overrides only. -# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/gcr-argo/credentials-clickhouse.yaml b/environments/gcr-argo/credentials-clickhouse.yaml deleted file mode 100644 index eef6bee..0000000 --- a/environments/gcr-argo/credentials-clickhouse.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# ClickHouse secrets — FILL IN before first deploy -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - defaultUserPassword: gcr-argo-clickhouse-default-user-password diff --git a/environments/gcr-argo/credentials-countly.yaml b/environments/gcr-argo/credentials-countly.yaml deleted file mode 100644 index 1f9bb8f..0000000 --- a/environments/gcr-argo/credentials-countly.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Countly secrets — FILL IN before first deploy -# Passwords must match across charts (see secrets.example.yaml) -secrets: - mode: externalSecret - clickhouse: - username: "default" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: gcr-argo-countly-encryption-reports-key - webSessionSecret: gcr-argo-countly-web-session-secret - passwordSecret: gcr-argo-countly-password-secret - clickhouse: - password: gcr-argo-countly-clickhouse-password - mongodb: - password: gcr-argo-countly-mongodb-password diff --git a/environments/gcr-argo/credentials-kafka.yaml b/environments/gcr-argo/credentials-kafka.yaml deleted file mode 100644 index da8bd9d..0000000 --- a/environments/gcr-argo/credentials-kafka.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# Kafka secrets — FILL IN before first deploy -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - clickhouse: - password: gcr-argo-kafka-connect-clickhouse-password diff --git a/environments/gcr-argo/credentials-migration.yaml b/environments/gcr-argo/credentials-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/gcr-argo/credentials-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/gcr-argo/credentials-mongodb.yaml b/environments/gcr-argo/credentials-mongodb.yaml deleted file mode 100644 index 45f6e79..0000000 --- a/environments/gcr-argo/credentials-mongodb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# MongoDB secrets — FILL IN before first deploy -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - admin: - password: gcr-argo-mongodb-admin-password - app: - password: gcr-argo-mongodb-app-password - metrics: - password: gcr-argo-mongodb-metrics-password - -users: - admin: - enabled: true - metrics: - enabled: true diff --git a/environments/gcr-argo/credentials-observability.yaml b/environments/gcr-argo/credentials-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/gcr-argo/credentials-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/gcr-argo/external-secrets.example.yaml b/environments/gcr-argo/external-secrets.example.yaml deleted file mode 100644 index 35e59ad..0000000 --- a/environments/gcr-argo/external-secrets.example.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in the chart-specific secrets files under environments//: -# -# environments//credentials-countly.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "acme-countly-encryption-reports-key" -# webSessionSecret: "acme-countly-web-session-secret" -# passwordSecret: "acme-countly-password-secret" -# clickhouse: -# password: "acme-countly-clickhouse-password" -# mongodb: -# password: "acme-countly-mongodb-password" -# -# environments//credentials-clickhouse.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# defaultUserPassword: "acme-clickhouse-default-user-password" -# -# environments//credentials-kafka.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# clickhouse: -# password: "acme-kafka-connect-clickhouse-password" -# -# environments//credentials-mongodb.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# admin: -# password: "acme-mongodb-admin-password" -# app: -# password: "acme-mongodb-app-password" -# metrics: -# password: "acme-mongodb-metrics-password" -# -# For GAR image pulls, configure this in environments//global.yaml: -# -# global: -# imagePullSecrets: -# - name: countly-registry -# imagePullSecretExternalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRef: -# key: "acme-gar-dockerconfig" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/gcr-argo/global.yaml b/environments/gcr-argo/global.yaml deleted file mode 100644 index ab59ecf..0000000 --- a/environments/gcr-argo/global.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: "gcp-secrets" - kind: ClusterSecretStore - remoteRef: - key: "gcr-argo-gar-dockerconfig" - storageClass: "" - imagePullSecrets: - - name: countly-registry - -ingress: - hostname: gcr-argo.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/gcr-argo/image-pull-secrets.example.yaml b/environments/gcr-argo/image-pull-secrets.example.yaml deleted file mode 100644 index f1f537f..0000000 --- a/environments/gcr-argo/image-pull-secrets.example.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# ============================================================================= -# Image Pull Secrets Example -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. -# -# Use this when Countly and Kafka Connect pull from a private registry -# such as GCP Artifact Registry (GAR). -# -# Replace: -# - metadata.name with your actual secret name if not using "countly-gar" -# - namespaces if your releases run elsewhere -# - .dockerconfigjson with the base64-encoded contents of your Docker config -# -# You need one secret per namespace because imagePullSecrets are namespaced. -# For the default layout in this repo, create the same secret in: -# - countly -# - kafka -# -# Example source for the Docker config: -# cat ~/.docker/config.json | base64 | tr -d '\n' -# -# Kubernetes secret type must be: kubernetes.io/dockerconfigjson -# ============================================================================= - -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: countly -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON ---- -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: kafka -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/gcr-argo/kafka.yaml b/environments/gcr-argo/kafka.yaml deleted file mode 100644 index 7f40191..0000000 --- a/environments/gcr-argo/kafka.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Customer-specific Kafka overrides only. -# Keep tier1 defaults intact for other customers; this cluster needs a smaller -# Kafka Connect footprint to fit on the current node pool. - -kafkaConnect: - resources: - requests: - memory: "4Gi" - limits: - memory: "4Gi" - jvmOptions: - xms: "2g" - xmx: "2g" diff --git a/environments/gcr-argo/migration.yaml b/environments/gcr-argo/migration.yaml deleted file mode 100644 index fddc542..0000000 --- a/environments/gcr-argo/migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific migration overrides only. diff --git a/environments/gcr-argo/mongodb.yaml b/environments/gcr-argo/mongodb.yaml deleted file mode 100644 index ebe28cc..0000000 --- a/environments/gcr-argo/mongodb.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific MongoDB overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/gcr-argo/observability.yaml b/environments/gcr-argo/observability.yaml deleted file mode 100644 index 95d895f..0000000 --- a/environments/gcr-argo/observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific observability overrides only. diff --git a/environments/gcr-argo/secrets.example.yaml b/environments/gcr-argo/secrets.example.yaml deleted file mode 100644 index 181cb54..0000000 --- a/environments/gcr-argo/secrets.example.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//credentials-countly.yaml) --- -secrets: - mode: values - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- -secrets: - mode: values -users: - admin: - enabled: true - password: "CHANGEME-super-admin" - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- -secrets: - mode: values -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//credentials-kafka.yaml) --- -secrets: - mode: values -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" - -# For External Secrets Operator, switch the per-chart file to: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore diff --git a/environments/gcr-argo/secrets.sops.example.yaml b/environments/gcr-argo/secrets.sops.example.yaml deleted file mode 100644 index 7335b8d..0000000 --- a/environments/gcr-argo/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//credentials-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//credentials-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From e997fa4caed810e9a10c6734916db03c6078ae9d Mon Sep 17 00:00:00 2001 From: ihaardik Date: Wed, 1 Apr 2026 23:46:07 +0530 Subject: [PATCH 65/79] Harden customer scaffold and onboarding --- argocd/ONBOARDING.md | 18 ++++++++++++++++++ scripts/new-argocd-customer.sh | 10 ++++++++++ 2 files changed, 28 insertions(+) diff --git a/argocd/ONBOARDING.md b/argocd/ONBOARDING.md index d04b875..f066ee8 100644 --- a/argocd/ONBOARDING.md +++ b/argocd/ONBOARDING.md @@ -229,6 +229,24 @@ Set these carefully: - `kafkaConnect` - `migration` +Important for `server`: +- use the actual cluster API server URL Argo CD knows +- do not guess or paste a random external IP +- for GKE, the safest source is: + +```bash +gcloud container clusters describe \ + --zone \ + --project \ + --format="value(endpoint)" +``` + +Then use: + +```text +https:// +``` + Example: ```yaml diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index fe5ba74..7b58a94 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -51,6 +51,8 @@ if [[ -e "${customer_file}" ]]; then exit 1 fi +mkdir -p "$(dirname "${customer_file}")" + cp -R "${repo_root}/environments/reference" "${env_dir}" cat > "${env_dir}/global.yaml" < Date: Thu, 2 Apr 2026 11:27:45 +0530 Subject: [PATCH 66/79] Add div-ya-argo customer --- argocd/customers/div-ya-argo.yaml | 16 ++++ environments/div-ya-argo/README.md | 77 +++++++++++++++++ environments/div-ya-argo/clickhouse.yaml | 2 + .../cluster-secret-store.gcp.example.yaml | 31 +++++++ environments/div-ya-argo/countly-tls.env | 7 ++ environments/div-ya-argo/countly.yaml | 2 + .../div-ya-argo/credentials-clickhouse.yaml | 10 +++ .../div-ya-argo/credentials-countly.yaml | 23 +++++ .../div-ya-argo/credentials-kafka.yaml | 11 +++ .../div-ya-argo/credentials-migration.yaml | 2 + .../div-ya-argo/credentials-mongodb.yaml | 21 +++++ .../credentials-observability.yaml | 2 + .../div-ya-argo/external-secrets.example.yaml | 84 +++++++++++++++++++ environments/div-ya-argo/global.yaml | 39 +++++++++ .../image-pull-secrets.example.yaml | 41 +++++++++ environments/div-ya-argo/kafka.yaml | 2 + environments/div-ya-argo/migration.yaml | 1 + environments/div-ya-argo/mongodb.yaml | 2 + environments/div-ya-argo/observability.yaml | 1 + environments/div-ya-argo/secrets.example.yaml | 62 ++++++++++++++ .../div-ya-argo/secrets.sops.example.yaml | 21 +++++ 21 files changed, 457 insertions(+) create mode 100644 argocd/customers/div-ya-argo.yaml create mode 100644 environments/div-ya-argo/README.md create mode 100644 environments/div-ya-argo/clickhouse.yaml create mode 100644 environments/div-ya-argo/cluster-secret-store.gcp.example.yaml create mode 100644 environments/div-ya-argo/countly-tls.env create mode 100644 environments/div-ya-argo/countly.yaml create mode 100644 environments/div-ya-argo/credentials-clickhouse.yaml create mode 100644 environments/div-ya-argo/credentials-countly.yaml create mode 100644 environments/div-ya-argo/credentials-kafka.yaml create mode 100644 environments/div-ya-argo/credentials-migration.yaml create mode 100644 environments/div-ya-argo/credentials-mongodb.yaml create mode 100644 environments/div-ya-argo/credentials-observability.yaml create mode 100644 environments/div-ya-argo/external-secrets.example.yaml create mode 100644 environments/div-ya-argo/global.yaml create mode 100644 environments/div-ya-argo/image-pull-secrets.example.yaml create mode 100644 environments/div-ya-argo/kafka.yaml create mode 100644 environments/div-ya-argo/migration.yaml create mode 100644 environments/div-ya-argo/mongodb.yaml create mode 100644 environments/div-ya-argo/observability.yaml create mode 100644 environments/div-ya-argo/secrets.example.yaml create mode 100644 environments/div-ya-argo/secrets.sops.example.yaml diff --git a/argocd/customers/div-ya-argo.yaml b/argocd/customers/div-ya-argo.yaml new file mode 100644 index 0000000..b80c8e4 --- /dev/null +++ b/argocd/customers/div-ya-argo.yaml @@ -0,0 +1,16 @@ +customer: div-ya-argo +environment: div-ya-argo +project: countly-customers +server: https://35.193.84.128 +gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: div-ya-argo +clusterLocation: us-central1-a +hostname: div-ya-argo.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/div-ya-argo/README.md b/environments/div-ya-argo/README.md new file mode 100644 index 0000000..65a5fb3 --- /dev/null +++ b/environments/div-ya-argo/README.md @@ -0,0 +1,77 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + +3. Fill in required secrets in the chart-specific files: + - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/div-ya-argo/clickhouse.yaml b/environments/div-ya-argo/clickhouse.yaml new file mode 100644 index 0000000..17291a9 --- /dev/null +++ b/environments/div-ya-argo/clickhouse.yaml @@ -0,0 +1,2 @@ +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/div-ya-argo/cluster-secret-store.gcp.example.yaml b/environments/div-ya-argo/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..7bb563f --- /dev/null +++ b/environments/div-ya-argo/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/div-ya-argo/countly-tls.env b/environments/div-ya-argo/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/div-ya-argo/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/div-ya-argo/countly.yaml b/environments/div-ya-argo/countly.yaml new file mode 100644 index 0000000..b71d75e --- /dev/null +++ b/environments/div-ya-argo/countly.yaml @@ -0,0 +1,2 @@ +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/div-ya-argo/credentials-clickhouse.yaml b/environments/div-ya-argo/credentials-clickhouse.yaml new file mode 100644 index 0000000..4dcec9e --- /dev/null +++ b/environments/div-ya-argo/credentials-clickhouse.yaml @@ -0,0 +1,10 @@ +# ClickHouse secrets — FILL IN before first deploy +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: div-ya-argo-clickhouse-default-user-password diff --git a/environments/div-ya-argo/credentials-countly.yaml b/environments/div-ya-argo/credentials-countly.yaml new file mode 100644 index 0000000..3cd21d4 --- /dev/null +++ b/environments/div-ya-argo/credentials-countly.yaml @@ -0,0 +1,23 @@ +# Countly secrets — FILL IN before first deploy +# Passwords must match across charts (see secrets.example.yaml) +secrets: + mode: externalSecret + clickhouse: + username: "default" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: div-ya-argo-countly-encryption-reports-key + webSessionSecret: div-ya-argo-countly-web-session-secret + passwordSecret: div-ya-argo-countly-password-secret + clickhouse: + password: div-ya-argo-countly-clickhouse-password + mongodb: + password: div-ya-argo-countly-mongodb-password diff --git a/environments/div-ya-argo/credentials-kafka.yaml b/environments/div-ya-argo/credentials-kafka.yaml new file mode 100644 index 0000000..eaf32b2 --- /dev/null +++ b/environments/div-ya-argo/credentials-kafka.yaml @@ -0,0 +1,11 @@ +# Kafka secrets — FILL IN before first deploy +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + password: div-ya-argo-kafka-connect-clickhouse-password diff --git a/environments/div-ya-argo/credentials-migration.yaml b/environments/div-ya-argo/credentials-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/div-ya-argo/credentials-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/div-ya-argo/credentials-mongodb.yaml b/environments/div-ya-argo/credentials-mongodb.yaml new file mode 100644 index 0000000..f0361b6 --- /dev/null +++ b/environments/div-ya-argo/credentials-mongodb.yaml @@ -0,0 +1,21 @@ +# MongoDB secrets — FILL IN before first deploy +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: div-ya-argo-mongodb-admin-password + app: + password: div-ya-argo-mongodb-app-password + metrics: + password: div-ya-argo-mongodb-metrics-password + +users: + admin: + enabled: true + metrics: + enabled: true diff --git a/environments/div-ya-argo/credentials-observability.yaml b/environments/div-ya-argo/credentials-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/div-ya-argo/credentials-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/div-ya-argo/external-secrets.example.yaml b/environments/div-ya-argo/external-secrets.example.yaml new file mode 100644 index 0000000..35e59ad --- /dev/null +++ b/environments/div-ya-argo/external-secrets.example.yaml @@ -0,0 +1,84 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in the chart-specific secrets files under environments//: +# +# environments//credentials-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" +# clickhouse: +# password: "acme-countly-clickhouse-password" +# mongodb: +# password: "acme-countly-mongodb-password" +# +# environments//credentials-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "acme-clickhouse-default-user-password" +# +# environments//credentials-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# password: "acme-kafka-connect-clickhouse-password" +# +# environments//credentials-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# admin: +# password: "acme-mongodb-admin-password" +# app: +# password: "acme-mongodb-app-password" +# metrics: +# password: "acme-mongodb-metrics-password" +# +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "acme-gar-dockerconfig" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/div-ya-argo/global.yaml b/environments/div-ya-argo/global.yaml new file mode 100644 index 0000000..6517296 --- /dev/null +++ b/environments/div-ya-argo/global.yaml @@ -0,0 +1,39 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: "gcp-secrets" + kind: ClusterSecretStore + remoteRef: + key: "customers-gcr-argo-gar-dockerconfig" + storageClass: "" + imagePullSecrets: + - name: countly-registry + +ingress: + hostname: div-ya-argo.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/div-ya-argo/image-pull-secrets.example.yaml b/environments/div-ya-argo/image-pull-secrets.example.yaml new file mode 100644 index 0000000..f1f537f --- /dev/null +++ b/environments/div-ya-argo/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-gar" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/div-ya-argo/kafka.yaml b/environments/div-ya-argo/kafka.yaml new file mode 100644 index 0000000..ff6fe5e --- /dev/null +++ b/environments/div-ya-argo/kafka.yaml @@ -0,0 +1,2 @@ +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/div-ya-argo/migration.yaml b/environments/div-ya-argo/migration.yaml new file mode 100644 index 0000000..fddc542 --- /dev/null +++ b/environments/div-ya-argo/migration.yaml @@ -0,0 +1 @@ +# Customer-specific migration overrides only. diff --git a/environments/div-ya-argo/mongodb.yaml b/environments/div-ya-argo/mongodb.yaml new file mode 100644 index 0000000..ebe28cc --- /dev/null +++ b/environments/div-ya-argo/mongodb.yaml @@ -0,0 +1,2 @@ +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/div-ya-argo/observability.yaml b/environments/div-ya-argo/observability.yaml new file mode 100644 index 0000000..95d895f --- /dev/null +++ b/environments/div-ya-argo/observability.yaml @@ -0,0 +1 @@ +# Customer-specific observability overrides only. diff --git a/environments/div-ya-argo/secrets.example.yaml b/environments/div-ya-argo/secrets.example.yaml new file mode 100644 index 0000000..181cb54 --- /dev/null +++ b/environments/div-ya-argo/secrets.example.yaml @@ -0,0 +1,62 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//credentials-countly.yaml) --- +secrets: + mode: values + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- +secrets: + mode: values +users: + admin: + enabled: true + password: "CHANGEME-super-admin" + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- +secrets: + mode: values +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- +secrets: + mode: values +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" + +# For External Secrets Operator, switch the per-chart file to: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore diff --git a/environments/div-ya-argo/secrets.sops.example.yaml b/environments/div-ya-argo/secrets.sops.example.yaml new file mode 100644 index 0000000..7335b8d --- /dev/null +++ b/environments/div-ya-argo/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//credentials-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//credentials-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From 43f35526bc273aaa82440c8ac3c7bcb879231676 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 11:47:15 +0530 Subject: [PATCH 67/79] Escape MongoDB passwords in Countly URIs --- charts/countly/templates/_helpers.tpl | 14 ++++++++++++-- .../countly/templates/external-secret-mongodb.yaml | 2 +- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/charts/countly/templates/_helpers.tpl b/charts/countly/templates/_helpers.tpl index bb4521e..3d6b800 100644 --- a/charts/countly/templates/_helpers.tpl +++ b/charts/countly/templates/_helpers.tpl @@ -119,6 +119,16 @@ Effective TLS secret name. {{- ((.Values.ingress).tls).secretName | default (printf "%s-tls" (include "countly.fullname" .)) -}} {{- end -}} +{{/* +Escape MongoDB URI user-info values safely. +urlquery handles reserved characters but encodes spaces as "+", which is +query-style encoding. Replace "+" with "%20" so the result is safe in URI +user-info segments too. +*/}} +{{- define "countly.mongodb.escapeUserInfo" -}} +{{- . | urlquery | replace "+" "%20" -}} +{{- end -}} + {{/* MongoDB connection string computation. Reads from backingServices.mongodb; constructs from service DNS if not provided. @@ -138,7 +148,7 @@ Reads from backingServices.mongodb; constructs from service DNS if not provided. {{- $user := $bs.username | default "app" -}} {{- $db := $bs.database | default "admin" -}} {{- $rs := $bs.replicaSet | default (printf "%s-mongodb" .Release.Name) -}} -mongodb://{{ $user }}:{{ $pass }}@{{ $host }}:{{ $port }}/{{ $db }}?replicaSet={{ $rs }}&ssl=false +mongodb://{{ include "countly.mongodb.escapeUserInfo" $user }}:{{ include "countly.mongodb.escapeUserInfo" $pass }}@{{ $host }}:{{ $port }}/{{ $db }}?replicaSet={{ $rs }}&ssl=false {{- end -}} {{- end -}} @@ -162,7 +172,7 @@ Used by ExternalSecret templates where the password may come from the secret bac {{- $user := $bs.username | default "app" -}} {{- $db := $bs.database | default "admin" -}} {{- $rs := $bs.replicaSet | default (printf "%s-mongodb" $root.Release.Name) -}} -mongodb://{{ $user }}:{{ $pass }}@{{ $host }}:{{ $port }}/{{ $db }}?replicaSet={{ $rs }}&ssl=false +mongodb://{{ include "countly.mongodb.escapeUserInfo" $user }}:{{ $pass }}@{{ $host }}:{{ $port }}/{{ $db }}?replicaSet={{ $rs }}&ssl=false {{- end -}} {{- end -}} diff --git a/charts/countly/templates/external-secret-mongodb.yaml b/charts/countly/templates/external-secret-mongodb.yaml index ddd1443..d66c21d 100644 --- a/charts/countly/templates/external-secret-mongodb.yaml +++ b/charts/countly/templates/external-secret-mongodb.yaml @@ -23,7 +23,7 @@ spec: data: {{- if .Values.secrets.externalSecret.remoteRefs.mongodb.connectionString }} {{- else if .Values.secrets.externalSecret.remoteRefs.mongodb.password }} - {{ .Values.secrets.mongodb.key | default "connectionString.standard" }}: {{ include "countly.mongodb.connectionStringWithPassword" (dict "root" . "password" "{{ .mongodbPassword }}") | quote }} + {{ .Values.secrets.mongodb.key | default "connectionString.standard" }}: {{ include "countly.mongodb.connectionStringWithPassword" (dict "root" . "password" "{{ .mongodbPassword | urlquery | replace \"+\" \"%20\" }}" ) | quote }} {{- else }} {{ .Values.secrets.mongodb.key | default "connectionString.standard" }}: {{ include "countly.mongodb.connectionString" . | quote }} {{- end }} From 00eebba9075d55574afdaa58934686e67a4ddb0b Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 12:26:41 +0530 Subject: [PATCH 68/79] Add hardik-argo customer --- argocd/customers/hardik-argo.yaml | 16 ++++ environments/hardik-argo/README.md | 77 +++++++++++++++++ environments/hardik-argo/clickhouse.yaml | 2 + .../cluster-secret-store.gcp.example.yaml | 31 +++++++ environments/hardik-argo/countly-tls.env | 7 ++ environments/hardik-argo/countly.yaml | 2 + .../hardik-argo/credentials-clickhouse.yaml | 10 +++ .../hardik-argo/credentials-countly.yaml | 22 +++++ .../hardik-argo/credentials-kafka.yaml | 11 +++ .../hardik-argo/credentials-migration.yaml | 2 + .../hardik-argo/credentials-mongodb.yaml | 21 +++++ .../credentials-observability.yaml | 2 + .../hardik-argo/external-secrets.example.yaml | 84 +++++++++++++++++++ environments/hardik-argo/global.yaml | 39 +++++++++ .../image-pull-secrets.example.yaml | 41 +++++++++ environments/hardik-argo/kafka.yaml | 2 + environments/hardik-argo/migration.yaml | 1 + environments/hardik-argo/mongodb.yaml | 2 + environments/hardik-argo/observability.yaml | 1 + environments/hardik-argo/secrets.example.yaml | 62 ++++++++++++++ .../hardik-argo/secrets.sops.example.yaml | 21 +++++ 21 files changed, 456 insertions(+) create mode 100644 argocd/customers/hardik-argo.yaml create mode 100644 environments/hardik-argo/README.md create mode 100644 environments/hardik-argo/clickhouse.yaml create mode 100644 environments/hardik-argo/cluster-secret-store.gcp.example.yaml create mode 100644 environments/hardik-argo/countly-tls.env create mode 100644 environments/hardik-argo/countly.yaml create mode 100644 environments/hardik-argo/credentials-clickhouse.yaml create mode 100644 environments/hardik-argo/credentials-countly.yaml create mode 100644 environments/hardik-argo/credentials-kafka.yaml create mode 100644 environments/hardik-argo/credentials-migration.yaml create mode 100644 environments/hardik-argo/credentials-mongodb.yaml create mode 100644 environments/hardik-argo/credentials-observability.yaml create mode 100644 environments/hardik-argo/external-secrets.example.yaml create mode 100644 environments/hardik-argo/global.yaml create mode 100644 environments/hardik-argo/image-pull-secrets.example.yaml create mode 100644 environments/hardik-argo/kafka.yaml create mode 100644 environments/hardik-argo/migration.yaml create mode 100644 environments/hardik-argo/mongodb.yaml create mode 100644 environments/hardik-argo/observability.yaml create mode 100644 environments/hardik-argo/secrets.example.yaml create mode 100644 environments/hardik-argo/secrets.sops.example.yaml diff --git a/argocd/customers/hardik-argo.yaml b/argocd/customers/hardik-argo.yaml new file mode 100644 index 0000000..c246351 --- /dev/null +++ b/argocd/customers/hardik-argo.yaml @@ -0,0 +1,16 @@ +customer: hardik-argo +environment: hardik-argo +project: countly-customers +server: https://34.69.69.19 +gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: hardik-argo +clusterLocation: us-central1-a +hostname: hardik-argo.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/hardik-argo/README.md b/environments/hardik-argo/README.md new file mode 100644 index 0000000..65a5fb3 --- /dev/null +++ b/environments/hardik-argo/README.md @@ -0,0 +1,77 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + +3. Fill in required secrets in the chart-specific files: + - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/hardik-argo/clickhouse.yaml b/environments/hardik-argo/clickhouse.yaml new file mode 100644 index 0000000..17291a9 --- /dev/null +++ b/environments/hardik-argo/clickhouse.yaml @@ -0,0 +1,2 @@ +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/hardik-argo/cluster-secret-store.gcp.example.yaml b/environments/hardik-argo/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..7bb563f --- /dev/null +++ b/environments/hardik-argo/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/hardik-argo/countly-tls.env b/environments/hardik-argo/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/hardik-argo/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/hardik-argo/countly.yaml b/environments/hardik-argo/countly.yaml new file mode 100644 index 0000000..b71d75e --- /dev/null +++ b/environments/hardik-argo/countly.yaml @@ -0,0 +1,2 @@ +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/hardik-argo/credentials-clickhouse.yaml b/environments/hardik-argo/credentials-clickhouse.yaml new file mode 100644 index 0000000..ea08f89 --- /dev/null +++ b/environments/hardik-argo/credentials-clickhouse.yaml @@ -0,0 +1,10 @@ +# ClickHouse secrets — FILL IN before first deploy +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: hardik-argo-clickhouse-default-user-password diff --git a/environments/hardik-argo/credentials-countly.yaml b/environments/hardik-argo/credentials-countly.yaml new file mode 100644 index 0000000..be19a22 --- /dev/null +++ b/environments/hardik-argo/credentials-countly.yaml @@ -0,0 +1,22 @@ +# Countly secrets — FILL IN before first deploy +secrets: + mode: externalSecret + clickhouse: + username: "default" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: hardik-argo-countly-encryption-reports-key + webSessionSecret: hardik-argo-countly-web-session-secret + passwordSecret: hardik-argo-countly-password-secret + clickhouse: + password: hardik-argo-countly-clickhouse-password + mongodb: + password: hardik-argo-countly-mongodb-password diff --git a/environments/hardik-argo/credentials-kafka.yaml b/environments/hardik-argo/credentials-kafka.yaml new file mode 100644 index 0000000..e2851bb --- /dev/null +++ b/environments/hardik-argo/credentials-kafka.yaml @@ -0,0 +1,11 @@ +# Kafka secrets — FILL IN before first deploy +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + password: hardik-argo-kafka-connect-clickhouse-password diff --git a/environments/hardik-argo/credentials-migration.yaml b/environments/hardik-argo/credentials-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/hardik-argo/credentials-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/hardik-argo/credentials-mongodb.yaml b/environments/hardik-argo/credentials-mongodb.yaml new file mode 100644 index 0000000..cddfb25 --- /dev/null +++ b/environments/hardik-argo/credentials-mongodb.yaml @@ -0,0 +1,21 @@ +# MongoDB secrets — FILL IN before first deploy +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: hardik-argo-mongodb-admin-password + app: + password: hardik-argo-mongodb-app-password + metrics: + password: hardik-argo-mongodb-metrics-password + +users: + admin: + enabled: true + metrics: + enabled: true diff --git a/environments/hardik-argo/credentials-observability.yaml b/environments/hardik-argo/credentials-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/hardik-argo/credentials-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/hardik-argo/external-secrets.example.yaml b/environments/hardik-argo/external-secrets.example.yaml new file mode 100644 index 0000000..35e59ad --- /dev/null +++ b/environments/hardik-argo/external-secrets.example.yaml @@ -0,0 +1,84 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in the chart-specific secrets files under environments//: +# +# environments//credentials-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" +# clickhouse: +# password: "acme-countly-clickhouse-password" +# mongodb: +# password: "acme-countly-mongodb-password" +# +# environments//credentials-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "acme-clickhouse-default-user-password" +# +# environments//credentials-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# password: "acme-kafka-connect-clickhouse-password" +# +# environments//credentials-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# admin: +# password: "acme-mongodb-admin-password" +# app: +# password: "acme-mongodb-app-password" +# metrics: +# password: "acme-mongodb-metrics-password" +# +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "acme-gar-dockerconfig" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/hardik-argo/global.yaml b/environments/hardik-argo/global.yaml new file mode 100644 index 0000000..d113c12 --- /dev/null +++ b/environments/hardik-argo/global.yaml @@ -0,0 +1,39 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: "gcp-secrets" + kind: ClusterSecretStore + remoteRef: + key: "customers-gcr-argo-gar-dockerconfig" + storageClass: "" + imagePullSecrets: + - name: countly-registry + +ingress: + hostname: hardik-argo.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/hardik-argo/image-pull-secrets.example.yaml b/environments/hardik-argo/image-pull-secrets.example.yaml new file mode 100644 index 0000000..f1f537f --- /dev/null +++ b/environments/hardik-argo/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-gar" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/hardik-argo/kafka.yaml b/environments/hardik-argo/kafka.yaml new file mode 100644 index 0000000..ff6fe5e --- /dev/null +++ b/environments/hardik-argo/kafka.yaml @@ -0,0 +1,2 @@ +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/hardik-argo/migration.yaml b/environments/hardik-argo/migration.yaml new file mode 100644 index 0000000..fddc542 --- /dev/null +++ b/environments/hardik-argo/migration.yaml @@ -0,0 +1 @@ +# Customer-specific migration overrides only. diff --git a/environments/hardik-argo/mongodb.yaml b/environments/hardik-argo/mongodb.yaml new file mode 100644 index 0000000..ebe28cc --- /dev/null +++ b/environments/hardik-argo/mongodb.yaml @@ -0,0 +1,2 @@ +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/hardik-argo/observability.yaml b/environments/hardik-argo/observability.yaml new file mode 100644 index 0000000..95d895f --- /dev/null +++ b/environments/hardik-argo/observability.yaml @@ -0,0 +1 @@ +# Customer-specific observability overrides only. diff --git a/environments/hardik-argo/secrets.example.yaml b/environments/hardik-argo/secrets.example.yaml new file mode 100644 index 0000000..181cb54 --- /dev/null +++ b/environments/hardik-argo/secrets.example.yaml @@ -0,0 +1,62 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//credentials-countly.yaml) --- +secrets: + mode: values + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- +secrets: + mode: values +users: + admin: + enabled: true + password: "CHANGEME-super-admin" + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- +secrets: + mode: values +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- +secrets: + mode: values +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" + +# For External Secrets Operator, switch the per-chart file to: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore diff --git a/environments/hardik-argo/secrets.sops.example.yaml b/environments/hardik-argo/secrets.sops.example.yaml new file mode 100644 index 0000000..7335b8d --- /dev/null +++ b/environments/hardik-argo/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//credentials-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//credentials-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From f4a2052da8c16318c5e62692dc73a77f0de1095c Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 13:03:25 +0530 Subject: [PATCH 69/79] Reuse MongoDB app secret across charts --- argocd/ONBOARDING.md | 12 +++++++++--- argocd/README.md | 8 +++++++- docs/SECRET-MANAGEMENT.md | 5 ++--- environments/div-ya-argo/credentials-countly.yaml | 2 +- .../div-ya-argo/external-secrets.example.yaml | 2 +- environments/hardik-argo/credentials-countly.yaml | 2 +- .../hardik-argo/external-secrets.example.yaml | 2 +- environments/reference/credentials-countly.yaml | 4 ++-- environments/reference/credentials-mongodb.yaml | 2 +- environments/reference/external-secrets.example.yaml | 2 +- 10 files changed, 26 insertions(+), 15 deletions(-) diff --git a/argocd/ONBOARDING.md b/argocd/ONBOARDING.md index f066ee8..3009358 100644 --- a/argocd/ONBOARDING.md +++ b/argocd/ONBOARDING.md @@ -68,7 +68,6 @@ northstar-countly-encryption-reports-key northstar-countly-web-session-secret northstar-countly-password-secret northstar-countly-clickhouse-password -northstar-countly-mongodb-password northstar-kafka-connect-clickhouse-password northstar-clickhouse-default-user-password northstar-mongodb-admin-password @@ -589,7 +588,7 @@ northstar-countly-encryption-reports-key northstar-countly-web-session-secret northstar-countly-password-secret northstar-countly-clickhouse-password -northstar-countly-mongodb-password +northstar-mongodb-app-password northstar-kafka-connect-clickhouse-password northstar-clickhouse-default-user-password northstar-mongodb-admin-password @@ -633,7 +632,7 @@ secrets: clickhouse: password: northstar-countly-clickhouse-password mongodb: - password: northstar-countly-mongodb-password + password: northstar-mongodb-app-password clickhouse: username: default database: countly_drill @@ -695,6 +694,13 @@ secrets: password: northstar-mongodb-metrics-password ``` +Important: +- Countly and MongoDB `app` must use the same password +- for new customers, reuse the same Secret Manager key in both files: + - `credentials-countly.yaml` + - `credentials-mongodb.yaml` +- use `-mongodb-app-password` for both + ## Step 8: Commit And Sync Commit: diff --git a/argocd/README.md b/argocd/README.md index b8a9f6e..a1831fd 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -161,13 +161,19 @@ Recommended secret names: - `-countly-web-session-secret` - `-countly-password-secret` - `-countly-clickhouse-password` -- `-countly-mongodb-password` - `-kafka-connect-clickhouse-password` - `-clickhouse-default-user-password` - `-mongodb-admin-password` - `-mongodb-app-password` - `-mongodb-metrics-password` +Use the same Secret Manager key for: +- Countly MongoDB password +- MongoDB `app` user password + +That means new customers should point both charts at: +- `-mongodb-app-password` + Note: - existing customer environments may still use older secret names - use the new convention for all new customers diff --git a/docs/SECRET-MANAGEMENT.md b/docs/SECRET-MANAGEMENT.md index 89dba18..aaae835 100644 --- a/docs/SECRET-MANAGEMENT.md +++ b/docs/SECRET-MANAGEMENT.md @@ -60,7 +60,7 @@ secrets: clickhouse: password: "acme-countly-clickhouse-password" mongodb: - password: "acme-countly-mongodb-password" + password: "acme-mongodb-app-password" ``` Recommended naming convention: @@ -69,7 +69,6 @@ Recommended naming convention: - `-countly-web-session-secret` - `-countly-password-secret` - `-countly-clickhouse-password` -- `-countly-mongodb-password` - `-kafka-connect-clickhouse-password` - `-clickhouse-default-user-password` - `-mongodb-admin-password` @@ -86,7 +85,7 @@ All secrets are required on first install. On upgrades, existing values are pres | countly | common | webSessionSecret | Session cookie signing (min 8 chars) | | countly | common | passwordSecret | Password hashing (min 8 chars) | | countly | clickhouse | password | ClickHouse default user auth | -| countly | mongodb | password | MongoDB app user auth | +| countly | mongodb | password | MongoDB app user auth, reuse the same GSM key as `countly-mongodb.users.app.password` | | countly-mongodb | users.app | password | Must match countly secrets.mongodb.password | | countly-mongodb | users.metrics | password | Prometheus exporter auth | | countly-clickhouse | auth.defaultUserPassword | password | Must match countly secrets.clickhouse.password | diff --git a/environments/div-ya-argo/credentials-countly.yaml b/environments/div-ya-argo/credentials-countly.yaml index 3cd21d4..298c9bf 100644 --- a/environments/div-ya-argo/credentials-countly.yaml +++ b/environments/div-ya-argo/credentials-countly.yaml @@ -20,4 +20,4 @@ secrets: clickhouse: password: div-ya-argo-countly-clickhouse-password mongodb: - password: div-ya-argo-countly-mongodb-password + password: div-ya-argo-mongodb-app-password diff --git a/environments/div-ya-argo/external-secrets.example.yaml b/environments/div-ya-argo/external-secrets.example.yaml index 35e59ad..ca3b53a 100644 --- a/environments/div-ya-argo/external-secrets.example.yaml +++ b/environments/div-ya-argo/external-secrets.example.yaml @@ -20,7 +20,7 @@ # clickhouse: # password: "acme-countly-clickhouse-password" # mongodb: -# password: "acme-countly-mongodb-password" +# password: "acme-mongodb-app-password" # # environments//credentials-clickhouse.yaml # secrets: diff --git a/environments/hardik-argo/credentials-countly.yaml b/environments/hardik-argo/credentials-countly.yaml index be19a22..a06ea2b 100644 --- a/environments/hardik-argo/credentials-countly.yaml +++ b/environments/hardik-argo/credentials-countly.yaml @@ -19,4 +19,4 @@ secrets: clickhouse: password: hardik-argo-countly-clickhouse-password mongodb: - password: hardik-argo-countly-mongodb-password + password: hardik-argo-mongodb-app-password diff --git a/environments/hardik-argo/external-secrets.example.yaml b/environments/hardik-argo/external-secrets.example.yaml index 35e59ad..ca3b53a 100644 --- a/environments/hardik-argo/external-secrets.example.yaml +++ b/environments/hardik-argo/external-secrets.example.yaml @@ -20,7 +20,7 @@ # clickhouse: # password: "acme-countly-clickhouse-password" # mongodb: -# password: "acme-countly-mongodb-password" +# password: "acme-mongodb-app-password" # # environments//credentials-clickhouse.yaml # secrets: diff --git a/environments/reference/credentials-countly.yaml b/environments/reference/credentials-countly.yaml index abe6fb5..23c147e 100644 --- a/environments/reference/credentials-countly.yaml +++ b/environments/reference/credentials-countly.yaml @@ -13,7 +13,7 @@ secrets: kafka: securityProtocol: "PLAINTEXT" mongodb: - password: "" # REQUIRED: must match credentials-mongodb.yaml + password: "" # REQUIRED: must match credentials-mongodb.yaml users.app.password # For Secret Manager / External Secrets instead of direct values: # secrets: @@ -31,4 +31,4 @@ secrets: # clickhouse: # password: "acme-countly-clickhouse-password" # mongodb: -# password: "acme-countly-mongodb-password" +# password: "acme-mongodb-app-password" diff --git a/environments/reference/credentials-mongodb.yaml b/environments/reference/credentials-mongodb.yaml index 69b9ff4..0aef2b3 100644 --- a/environments/reference/credentials-mongodb.yaml +++ b/environments/reference/credentials-mongodb.yaml @@ -24,6 +24,6 @@ users: # admin: # password: "acme-mongodb-admin-password" # app: -# password: "acme-mongodb-app-password" +# password: "acme-mongodb-app-password" # Reuse this same GSM secret in credentials-countly.yaml # metrics: # password: "acme-mongodb-metrics-password" diff --git a/environments/reference/external-secrets.example.yaml b/environments/reference/external-secrets.example.yaml index 35e59ad..ca3b53a 100644 --- a/environments/reference/external-secrets.example.yaml +++ b/environments/reference/external-secrets.example.yaml @@ -20,7 +20,7 @@ # clickhouse: # password: "acme-countly-clickhouse-password" # mongodb: -# password: "acme-countly-mongodb-password" +# password: "acme-mongodb-app-password" # # environments//credentials-clickhouse.yaml # secrets: From f8ebe0dedde61cfa1687de2cb88a70506b4b744c Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 13:06:02 +0530 Subject: [PATCH 70/79] Remove test customers --- argocd/customers/div-ya-argo.yaml | 16 ---- argocd/customers/hardik-argo.yaml | 16 ---- environments/div-ya-argo/README.md | 77 ----------------- environments/div-ya-argo/clickhouse.yaml | 2 - .../cluster-secret-store.gcp.example.yaml | 31 ------- environments/div-ya-argo/countly-tls.env | 7 -- environments/div-ya-argo/countly.yaml | 2 - .../div-ya-argo/credentials-clickhouse.yaml | 10 --- .../div-ya-argo/credentials-countly.yaml | 23 ----- .../div-ya-argo/credentials-kafka.yaml | 11 --- .../div-ya-argo/credentials-migration.yaml | 2 - .../div-ya-argo/credentials-mongodb.yaml | 21 ----- .../credentials-observability.yaml | 2 - .../div-ya-argo/external-secrets.example.yaml | 84 ------------------- environments/div-ya-argo/global.yaml | 39 --------- .../image-pull-secrets.example.yaml | 41 --------- environments/div-ya-argo/kafka.yaml | 2 - environments/div-ya-argo/migration.yaml | 1 - environments/div-ya-argo/mongodb.yaml | 2 - environments/div-ya-argo/observability.yaml | 1 - environments/div-ya-argo/secrets.example.yaml | 62 -------------- .../div-ya-argo/secrets.sops.example.yaml | 21 ----- environments/hardik-argo/README.md | 77 ----------------- environments/hardik-argo/clickhouse.yaml | 2 - .../cluster-secret-store.gcp.example.yaml | 31 ------- environments/hardik-argo/countly-tls.env | 7 -- environments/hardik-argo/countly.yaml | 2 - .../hardik-argo/credentials-clickhouse.yaml | 10 --- .../hardik-argo/credentials-countly.yaml | 22 ----- .../hardik-argo/credentials-kafka.yaml | 11 --- .../hardik-argo/credentials-migration.yaml | 2 - .../hardik-argo/credentials-mongodb.yaml | 21 ----- .../credentials-observability.yaml | 2 - .../hardik-argo/external-secrets.example.yaml | 84 ------------------- environments/hardik-argo/global.yaml | 39 --------- .../image-pull-secrets.example.yaml | 41 --------- environments/hardik-argo/kafka.yaml | 2 - environments/hardik-argo/migration.yaml | 1 - environments/hardik-argo/mongodb.yaml | 2 - environments/hardik-argo/observability.yaml | 1 - environments/hardik-argo/secrets.example.yaml | 62 -------------- .../hardik-argo/secrets.sops.example.yaml | 21 ----- 42 files changed, 913 deletions(-) delete mode 100644 argocd/customers/div-ya-argo.yaml delete mode 100644 argocd/customers/hardik-argo.yaml delete mode 100644 environments/div-ya-argo/README.md delete mode 100644 environments/div-ya-argo/clickhouse.yaml delete mode 100644 environments/div-ya-argo/cluster-secret-store.gcp.example.yaml delete mode 100644 environments/div-ya-argo/countly-tls.env delete mode 100644 environments/div-ya-argo/countly.yaml delete mode 100644 environments/div-ya-argo/credentials-clickhouse.yaml delete mode 100644 environments/div-ya-argo/credentials-countly.yaml delete mode 100644 environments/div-ya-argo/credentials-kafka.yaml delete mode 100644 environments/div-ya-argo/credentials-migration.yaml delete mode 100644 environments/div-ya-argo/credentials-mongodb.yaml delete mode 100644 environments/div-ya-argo/credentials-observability.yaml delete mode 100644 environments/div-ya-argo/external-secrets.example.yaml delete mode 100644 environments/div-ya-argo/global.yaml delete mode 100644 environments/div-ya-argo/image-pull-secrets.example.yaml delete mode 100644 environments/div-ya-argo/kafka.yaml delete mode 100644 environments/div-ya-argo/migration.yaml delete mode 100644 environments/div-ya-argo/mongodb.yaml delete mode 100644 environments/div-ya-argo/observability.yaml delete mode 100644 environments/div-ya-argo/secrets.example.yaml delete mode 100644 environments/div-ya-argo/secrets.sops.example.yaml delete mode 100644 environments/hardik-argo/README.md delete mode 100644 environments/hardik-argo/clickhouse.yaml delete mode 100644 environments/hardik-argo/cluster-secret-store.gcp.example.yaml delete mode 100644 environments/hardik-argo/countly-tls.env delete mode 100644 environments/hardik-argo/countly.yaml delete mode 100644 environments/hardik-argo/credentials-clickhouse.yaml delete mode 100644 environments/hardik-argo/credentials-countly.yaml delete mode 100644 environments/hardik-argo/credentials-kafka.yaml delete mode 100644 environments/hardik-argo/credentials-migration.yaml delete mode 100644 environments/hardik-argo/credentials-mongodb.yaml delete mode 100644 environments/hardik-argo/credentials-observability.yaml delete mode 100644 environments/hardik-argo/external-secrets.example.yaml delete mode 100644 environments/hardik-argo/global.yaml delete mode 100644 environments/hardik-argo/image-pull-secrets.example.yaml delete mode 100644 environments/hardik-argo/kafka.yaml delete mode 100644 environments/hardik-argo/migration.yaml delete mode 100644 environments/hardik-argo/mongodb.yaml delete mode 100644 environments/hardik-argo/observability.yaml delete mode 100644 environments/hardik-argo/secrets.example.yaml delete mode 100644 environments/hardik-argo/secrets.sops.example.yaml diff --git a/argocd/customers/div-ya-argo.yaml b/argocd/customers/div-ya-argo.yaml deleted file mode 100644 index b80c8e4..0000000 --- a/argocd/customers/div-ya-argo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -customer: div-ya-argo -environment: div-ya-argo -project: countly-customers -server: https://35.193.84.128 -gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com -secretManagerProjectID: countly-tools -clusterProjectID: countly-dev-313620 -clusterName: div-ya-argo -clusterLocation: us-central1-a -hostname: div-ya-argo.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/argocd/customers/hardik-argo.yaml b/argocd/customers/hardik-argo.yaml deleted file mode 100644 index c246351..0000000 --- a/argocd/customers/hardik-argo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -customer: hardik-argo -environment: hardik-argo -project: countly-customers -server: https://34.69.69.19 -gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com -secretManagerProjectID: countly-tools -clusterProjectID: countly-dev-313620 -clusterName: hardik-argo -clusterLocation: us-central1-a -hostname: hardik-argo.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/environments/div-ya-argo/README.md b/environments/div-ya-argo/README.md deleted file mode 100644 index 65a5fb3..0000000 --- a/environments/div-ya-argo/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` - -3. Fill in required secrets in the chart-specific files: - - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -For private registries such as GAR, also create namespaced image pull secrets. -Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. -If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `credentials-mongodb.yaml` | MongoDB user passwords | -| `credentials-clickhouse.yaml` | ClickHouse auth password | -| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | -| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | -| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | -| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/div-ya-argo/clickhouse.yaml b/environments/div-ya-argo/clickhouse.yaml deleted file mode 100644 index 17291a9..0000000 --- a/environments/div-ya-argo/clickhouse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific ClickHouse overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/div-ya-argo/cluster-secret-store.gcp.example.yaml b/environments/div-ya-argo/cluster-secret-store.gcp.example.yaml deleted file mode 100644 index 7bb563f..0000000 --- a/environments/div-ya-argo/cluster-secret-store.gcp.example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# External Secrets Operator + Google Secret Manager -# ClusterSecretStore Example -# ============================================================================= -# Apply this once per cluster after External Secrets Operator is installed. -# -# Prerequisites: -# - The external-secrets controller service account is annotated for Workload -# Identity with a GCP service account that can read Secret Manager secrets. -# - The GCP service account has at least: -# roles/secretmanager.secretAccessor -# -# This file is a reference only. Adapt project IDs and names to your cluster. -# ============================================================================= - -apiVersion: external-secrets.io/v1 -kind: ClusterSecretStore -metadata: - name: gcp-secrets -spec: - provider: - gcpsm: - projectID: countly-dev-313620 - auth: - workloadIdentity: - clusterLocation: us-central1 - clusterName: change-me - clusterProjectID: countly-dev-313620 - serviceAccountRef: - name: external-secrets - namespace: external-secrets diff --git a/environments/div-ya-argo/countly-tls.env b/environments/div-ya-argo/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/div-ya-argo/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/div-ya-argo/countly.yaml b/environments/div-ya-argo/countly.yaml deleted file mode 100644 index b71d75e..0000000 --- a/environments/div-ya-argo/countly.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Countly overrides only. -# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/div-ya-argo/credentials-clickhouse.yaml b/environments/div-ya-argo/credentials-clickhouse.yaml deleted file mode 100644 index 4dcec9e..0000000 --- a/environments/div-ya-argo/credentials-clickhouse.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# ClickHouse secrets — FILL IN before first deploy -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - defaultUserPassword: div-ya-argo-clickhouse-default-user-password diff --git a/environments/div-ya-argo/credentials-countly.yaml b/environments/div-ya-argo/credentials-countly.yaml deleted file mode 100644 index 298c9bf..0000000 --- a/environments/div-ya-argo/credentials-countly.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# Countly secrets — FILL IN before first deploy -# Passwords must match across charts (see secrets.example.yaml) -secrets: - mode: externalSecret - clickhouse: - username: "default" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: div-ya-argo-countly-encryption-reports-key - webSessionSecret: div-ya-argo-countly-web-session-secret - passwordSecret: div-ya-argo-countly-password-secret - clickhouse: - password: div-ya-argo-countly-clickhouse-password - mongodb: - password: div-ya-argo-mongodb-app-password diff --git a/environments/div-ya-argo/credentials-kafka.yaml b/environments/div-ya-argo/credentials-kafka.yaml deleted file mode 100644 index eaf32b2..0000000 --- a/environments/div-ya-argo/credentials-kafka.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# Kafka secrets — FILL IN before first deploy -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - clickhouse: - password: div-ya-argo-kafka-connect-clickhouse-password diff --git a/environments/div-ya-argo/credentials-migration.yaml b/environments/div-ya-argo/credentials-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/div-ya-argo/credentials-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/div-ya-argo/credentials-mongodb.yaml b/environments/div-ya-argo/credentials-mongodb.yaml deleted file mode 100644 index f0361b6..0000000 --- a/environments/div-ya-argo/credentials-mongodb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# MongoDB secrets — FILL IN before first deploy -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - admin: - password: div-ya-argo-mongodb-admin-password - app: - password: div-ya-argo-mongodb-app-password - metrics: - password: div-ya-argo-mongodb-metrics-password - -users: - admin: - enabled: true - metrics: - enabled: true diff --git a/environments/div-ya-argo/credentials-observability.yaml b/environments/div-ya-argo/credentials-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/div-ya-argo/credentials-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/div-ya-argo/external-secrets.example.yaml b/environments/div-ya-argo/external-secrets.example.yaml deleted file mode 100644 index ca3b53a..0000000 --- a/environments/div-ya-argo/external-secrets.example.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in the chart-specific secrets files under environments//: -# -# environments//credentials-countly.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "acme-countly-encryption-reports-key" -# webSessionSecret: "acme-countly-web-session-secret" -# passwordSecret: "acme-countly-password-secret" -# clickhouse: -# password: "acme-countly-clickhouse-password" -# mongodb: -# password: "acme-mongodb-app-password" -# -# environments//credentials-clickhouse.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# defaultUserPassword: "acme-clickhouse-default-user-password" -# -# environments//credentials-kafka.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# clickhouse: -# password: "acme-kafka-connect-clickhouse-password" -# -# environments//credentials-mongodb.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# admin: -# password: "acme-mongodb-admin-password" -# app: -# password: "acme-mongodb-app-password" -# metrics: -# password: "acme-mongodb-metrics-password" -# -# For GAR image pulls, configure this in environments//global.yaml: -# -# global: -# imagePullSecrets: -# - name: countly-registry -# imagePullSecretExternalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRef: -# key: "acme-gar-dockerconfig" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/div-ya-argo/global.yaml b/environments/div-ya-argo/global.yaml deleted file mode 100644 index 6517296..0000000 --- a/environments/div-ya-argo/global.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: "gcp-secrets" - kind: ClusterSecretStore - remoteRef: - key: "customers-gcr-argo-gar-dockerconfig" - storageClass: "" - imagePullSecrets: - - name: countly-registry - -ingress: - hostname: div-ya-argo.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/div-ya-argo/image-pull-secrets.example.yaml b/environments/div-ya-argo/image-pull-secrets.example.yaml deleted file mode 100644 index f1f537f..0000000 --- a/environments/div-ya-argo/image-pull-secrets.example.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# ============================================================================= -# Image Pull Secrets Example -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. -# -# Use this when Countly and Kafka Connect pull from a private registry -# such as GCP Artifact Registry (GAR). -# -# Replace: -# - metadata.name with your actual secret name if not using "countly-gar" -# - namespaces if your releases run elsewhere -# - .dockerconfigjson with the base64-encoded contents of your Docker config -# -# You need one secret per namespace because imagePullSecrets are namespaced. -# For the default layout in this repo, create the same secret in: -# - countly -# - kafka -# -# Example source for the Docker config: -# cat ~/.docker/config.json | base64 | tr -d '\n' -# -# Kubernetes secret type must be: kubernetes.io/dockerconfigjson -# ============================================================================= - -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: countly -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON ---- -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: kafka -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/div-ya-argo/kafka.yaml b/environments/div-ya-argo/kafka.yaml deleted file mode 100644 index ff6fe5e..0000000 --- a/environments/div-ya-argo/kafka.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Kafka overrides only. -# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/div-ya-argo/migration.yaml b/environments/div-ya-argo/migration.yaml deleted file mode 100644 index fddc542..0000000 --- a/environments/div-ya-argo/migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific migration overrides only. diff --git a/environments/div-ya-argo/mongodb.yaml b/environments/div-ya-argo/mongodb.yaml deleted file mode 100644 index ebe28cc..0000000 --- a/environments/div-ya-argo/mongodb.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific MongoDB overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/div-ya-argo/observability.yaml b/environments/div-ya-argo/observability.yaml deleted file mode 100644 index 95d895f..0000000 --- a/environments/div-ya-argo/observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific observability overrides only. diff --git a/environments/div-ya-argo/secrets.example.yaml b/environments/div-ya-argo/secrets.example.yaml deleted file mode 100644 index 181cb54..0000000 --- a/environments/div-ya-argo/secrets.example.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//credentials-countly.yaml) --- -secrets: - mode: values - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- -secrets: - mode: values -users: - admin: - enabled: true - password: "CHANGEME-super-admin" - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- -secrets: - mode: values -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//credentials-kafka.yaml) --- -secrets: - mode: values -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" - -# For External Secrets Operator, switch the per-chart file to: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore diff --git a/environments/div-ya-argo/secrets.sops.example.yaml b/environments/div-ya-argo/secrets.sops.example.yaml deleted file mode 100644 index 7335b8d..0000000 --- a/environments/div-ya-argo/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//credentials-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//credentials-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx diff --git a/environments/hardik-argo/README.md b/environments/hardik-argo/README.md deleted file mode 100644 index 65a5fb3..0000000 --- a/environments/hardik-argo/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` - -3. Fill in required secrets in the chart-specific files: - - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -For private registries such as GAR, also create namespaced image pull secrets. -Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. -If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `credentials-mongodb.yaml` | MongoDB user passwords | -| `credentials-clickhouse.yaml` | ClickHouse auth password | -| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | -| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | -| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | -| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/hardik-argo/clickhouse.yaml b/environments/hardik-argo/clickhouse.yaml deleted file mode 100644 index 17291a9..0000000 --- a/environments/hardik-argo/clickhouse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific ClickHouse overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/hardik-argo/cluster-secret-store.gcp.example.yaml b/environments/hardik-argo/cluster-secret-store.gcp.example.yaml deleted file mode 100644 index 7bb563f..0000000 --- a/environments/hardik-argo/cluster-secret-store.gcp.example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# External Secrets Operator + Google Secret Manager -# ClusterSecretStore Example -# ============================================================================= -# Apply this once per cluster after External Secrets Operator is installed. -# -# Prerequisites: -# - The external-secrets controller service account is annotated for Workload -# Identity with a GCP service account that can read Secret Manager secrets. -# - The GCP service account has at least: -# roles/secretmanager.secretAccessor -# -# This file is a reference only. Adapt project IDs and names to your cluster. -# ============================================================================= - -apiVersion: external-secrets.io/v1 -kind: ClusterSecretStore -metadata: - name: gcp-secrets -spec: - provider: - gcpsm: - projectID: countly-dev-313620 - auth: - workloadIdentity: - clusterLocation: us-central1 - clusterName: change-me - clusterProjectID: countly-dev-313620 - serviceAccountRef: - name: external-secrets - namespace: external-secrets diff --git a/environments/hardik-argo/countly-tls.env b/environments/hardik-argo/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/hardik-argo/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/hardik-argo/countly.yaml b/environments/hardik-argo/countly.yaml deleted file mode 100644 index b71d75e..0000000 --- a/environments/hardik-argo/countly.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Countly overrides only. -# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/hardik-argo/credentials-clickhouse.yaml b/environments/hardik-argo/credentials-clickhouse.yaml deleted file mode 100644 index ea08f89..0000000 --- a/environments/hardik-argo/credentials-clickhouse.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# ClickHouse secrets — FILL IN before first deploy -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - defaultUserPassword: hardik-argo-clickhouse-default-user-password diff --git a/environments/hardik-argo/credentials-countly.yaml b/environments/hardik-argo/credentials-countly.yaml deleted file mode 100644 index a06ea2b..0000000 --- a/environments/hardik-argo/credentials-countly.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Countly secrets — FILL IN before first deploy -secrets: - mode: externalSecret - clickhouse: - username: "default" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: hardik-argo-countly-encryption-reports-key - webSessionSecret: hardik-argo-countly-web-session-secret - passwordSecret: hardik-argo-countly-password-secret - clickhouse: - password: hardik-argo-countly-clickhouse-password - mongodb: - password: hardik-argo-mongodb-app-password diff --git a/environments/hardik-argo/credentials-kafka.yaml b/environments/hardik-argo/credentials-kafka.yaml deleted file mode 100644 index e2851bb..0000000 --- a/environments/hardik-argo/credentials-kafka.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# Kafka secrets — FILL IN before first deploy -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - clickhouse: - password: hardik-argo-kafka-connect-clickhouse-password diff --git a/environments/hardik-argo/credentials-migration.yaml b/environments/hardik-argo/credentials-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/hardik-argo/credentials-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/hardik-argo/credentials-mongodb.yaml b/environments/hardik-argo/credentials-mongodb.yaml deleted file mode 100644 index cddfb25..0000000 --- a/environments/hardik-argo/credentials-mongodb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# MongoDB secrets — FILL IN before first deploy -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - admin: - password: hardik-argo-mongodb-admin-password - app: - password: hardik-argo-mongodb-app-password - metrics: - password: hardik-argo-mongodb-metrics-password - -users: - admin: - enabled: true - metrics: - enabled: true diff --git a/environments/hardik-argo/credentials-observability.yaml b/environments/hardik-argo/credentials-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/hardik-argo/credentials-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/hardik-argo/external-secrets.example.yaml b/environments/hardik-argo/external-secrets.example.yaml deleted file mode 100644 index ca3b53a..0000000 --- a/environments/hardik-argo/external-secrets.example.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in the chart-specific secrets files under environments//: -# -# environments//credentials-countly.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "acme-countly-encryption-reports-key" -# webSessionSecret: "acme-countly-web-session-secret" -# passwordSecret: "acme-countly-password-secret" -# clickhouse: -# password: "acme-countly-clickhouse-password" -# mongodb: -# password: "acme-mongodb-app-password" -# -# environments//credentials-clickhouse.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# defaultUserPassword: "acme-clickhouse-default-user-password" -# -# environments//credentials-kafka.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# clickhouse: -# password: "acme-kafka-connect-clickhouse-password" -# -# environments//credentials-mongodb.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# admin: -# password: "acme-mongodb-admin-password" -# app: -# password: "acme-mongodb-app-password" -# metrics: -# password: "acme-mongodb-metrics-password" -# -# For GAR image pulls, configure this in environments//global.yaml: -# -# global: -# imagePullSecrets: -# - name: countly-registry -# imagePullSecretExternalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRef: -# key: "acme-gar-dockerconfig" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/hardik-argo/global.yaml b/environments/hardik-argo/global.yaml deleted file mode 100644 index d113c12..0000000 --- a/environments/hardik-argo/global.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: "gcp-secrets" - kind: ClusterSecretStore - remoteRef: - key: "customers-gcr-argo-gar-dockerconfig" - storageClass: "" - imagePullSecrets: - - name: countly-registry - -ingress: - hostname: hardik-argo.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/hardik-argo/image-pull-secrets.example.yaml b/environments/hardik-argo/image-pull-secrets.example.yaml deleted file mode 100644 index f1f537f..0000000 --- a/environments/hardik-argo/image-pull-secrets.example.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# ============================================================================= -# Image Pull Secrets Example -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. -# -# Use this when Countly and Kafka Connect pull from a private registry -# such as GCP Artifact Registry (GAR). -# -# Replace: -# - metadata.name with your actual secret name if not using "countly-gar" -# - namespaces if your releases run elsewhere -# - .dockerconfigjson with the base64-encoded contents of your Docker config -# -# You need one secret per namespace because imagePullSecrets are namespaced. -# For the default layout in this repo, create the same secret in: -# - countly -# - kafka -# -# Example source for the Docker config: -# cat ~/.docker/config.json | base64 | tr -d '\n' -# -# Kubernetes secret type must be: kubernetes.io/dockerconfigjson -# ============================================================================= - -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: countly -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON ---- -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: kafka -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/hardik-argo/kafka.yaml b/environments/hardik-argo/kafka.yaml deleted file mode 100644 index ff6fe5e..0000000 --- a/environments/hardik-argo/kafka.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Kafka overrides only. -# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/hardik-argo/migration.yaml b/environments/hardik-argo/migration.yaml deleted file mode 100644 index fddc542..0000000 --- a/environments/hardik-argo/migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific migration overrides only. diff --git a/environments/hardik-argo/mongodb.yaml b/environments/hardik-argo/mongodb.yaml deleted file mode 100644 index ebe28cc..0000000 --- a/environments/hardik-argo/mongodb.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific MongoDB overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/hardik-argo/observability.yaml b/environments/hardik-argo/observability.yaml deleted file mode 100644 index 95d895f..0000000 --- a/environments/hardik-argo/observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific observability overrides only. diff --git a/environments/hardik-argo/secrets.example.yaml b/environments/hardik-argo/secrets.example.yaml deleted file mode 100644 index 181cb54..0000000 --- a/environments/hardik-argo/secrets.example.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//credentials-countly.yaml) --- -secrets: - mode: values - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- -secrets: - mode: values -users: - admin: - enabled: true - password: "CHANGEME-super-admin" - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- -secrets: - mode: values -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//credentials-kafka.yaml) --- -secrets: - mode: values -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" - -# For External Secrets Operator, switch the per-chart file to: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore diff --git a/environments/hardik-argo/secrets.sops.example.yaml b/environments/hardik-argo/secrets.sops.example.yaml deleted file mode 100644 index 7335b8d..0000000 --- a/environments/hardik-argo/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//credentials-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//credentials-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From 7b388170b78a12903e6fec2a88522dd5088c7abc Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 15:05:07 +0530 Subject: [PATCH 71/79] Add final-argo customer --- argocd/customers/final-argo.yaml | 16 ++++ environments/final-argo/README.md | 77 +++++++++++++++++ environments/final-argo/clickhouse.yaml | 2 + .../cluster-secret-store.gcp.example.yaml | 31 +++++++ environments/final-argo/countly-tls.env | 7 ++ environments/final-argo/countly.yaml | 2 + .../final-argo/credentials-clickhouse.yaml | 10 +++ .../final-argo/credentials-countly.yaml | 22 +++++ .../final-argo/credentials-kafka.yaml | 11 +++ .../final-argo/credentials-migration.yaml | 2 + .../final-argo/credentials-mongodb.yaml | 21 +++++ .../final-argo/credentials-observability.yaml | 2 + .../final-argo/external-secrets.example.yaml | 84 +++++++++++++++++++ environments/final-argo/global.yaml | 39 +++++++++ .../image-pull-secrets.example.yaml | 41 +++++++++ environments/final-argo/kafka.yaml | 2 + environments/final-argo/migration.yaml | 1 + environments/final-argo/mongodb.yaml | 2 + environments/final-argo/observability.yaml | 1 + environments/final-argo/secrets.example.yaml | 62 ++++++++++++++ .../final-argo/secrets.sops.example.yaml | 21 +++++ 21 files changed, 456 insertions(+) create mode 100644 argocd/customers/final-argo.yaml create mode 100644 environments/final-argo/README.md create mode 100644 environments/final-argo/clickhouse.yaml create mode 100644 environments/final-argo/cluster-secret-store.gcp.example.yaml create mode 100644 environments/final-argo/countly-tls.env create mode 100644 environments/final-argo/countly.yaml create mode 100644 environments/final-argo/credentials-clickhouse.yaml create mode 100644 environments/final-argo/credentials-countly.yaml create mode 100644 environments/final-argo/credentials-kafka.yaml create mode 100644 environments/final-argo/credentials-migration.yaml create mode 100644 environments/final-argo/credentials-mongodb.yaml create mode 100644 environments/final-argo/credentials-observability.yaml create mode 100644 environments/final-argo/external-secrets.example.yaml create mode 100644 environments/final-argo/global.yaml create mode 100644 environments/final-argo/image-pull-secrets.example.yaml create mode 100644 environments/final-argo/kafka.yaml create mode 100644 environments/final-argo/migration.yaml create mode 100644 environments/final-argo/mongodb.yaml create mode 100644 environments/final-argo/observability.yaml create mode 100644 environments/final-argo/secrets.example.yaml create mode 100644 environments/final-argo/secrets.sops.example.yaml diff --git a/argocd/customers/final-argo.yaml b/argocd/customers/final-argo.yaml new file mode 100644 index 0000000..83f988d --- /dev/null +++ b/argocd/customers/final-argo.yaml @@ -0,0 +1,16 @@ +customer: final-argo +environment: final-argo +project: countly-customers +server: https://34.132.2.255 +gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: final-argo +clusterLocation: us-central1-a +hostname: final-argo.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/final-argo/README.md b/environments/final-argo/README.md new file mode 100644 index 0000000..65a5fb3 --- /dev/null +++ b/environments/final-argo/README.md @@ -0,0 +1,77 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + +3. Fill in required secrets in the chart-specific files: + - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/final-argo/clickhouse.yaml b/environments/final-argo/clickhouse.yaml new file mode 100644 index 0000000..17291a9 --- /dev/null +++ b/environments/final-argo/clickhouse.yaml @@ -0,0 +1,2 @@ +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/final-argo/cluster-secret-store.gcp.example.yaml b/environments/final-argo/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..7bb563f --- /dev/null +++ b/environments/final-argo/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/final-argo/countly-tls.env b/environments/final-argo/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/final-argo/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/final-argo/countly.yaml b/environments/final-argo/countly.yaml new file mode 100644 index 0000000..b71d75e --- /dev/null +++ b/environments/final-argo/countly.yaml @@ -0,0 +1,2 @@ +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/final-argo/credentials-clickhouse.yaml b/environments/final-argo/credentials-clickhouse.yaml new file mode 100644 index 0000000..60be2cf --- /dev/null +++ b/environments/final-argo/credentials-clickhouse.yaml @@ -0,0 +1,10 @@ +# ClickHouse secrets for final-argo. +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: final-argo-clickhouse-default-user-password diff --git a/environments/final-argo/credentials-countly.yaml b/environments/final-argo/credentials-countly.yaml new file mode 100644 index 0000000..bd3b7e8 --- /dev/null +++ b/environments/final-argo/credentials-countly.yaml @@ -0,0 +1,22 @@ +# Countly secrets for final-argo. +secrets: + mode: externalSecret + clickhouse: + username: "default" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: final-argo-countly-encryption-reports-key + webSessionSecret: final-argo-countly-web-session-secret + passwordSecret: final-argo-countly-password-secret + clickhouse: + password: final-argo-countly-clickhouse-password + mongodb: + password: final-argo-mongodb-app-password diff --git a/environments/final-argo/credentials-kafka.yaml b/environments/final-argo/credentials-kafka.yaml new file mode 100644 index 0000000..6f2ad31 --- /dev/null +++ b/environments/final-argo/credentials-kafka.yaml @@ -0,0 +1,11 @@ +# Kafka secrets for final-argo. +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + password: final-argo-kafka-connect-clickhouse-password diff --git a/environments/final-argo/credentials-migration.yaml b/environments/final-argo/credentials-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/final-argo/credentials-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/final-argo/credentials-mongodb.yaml b/environments/final-argo/credentials-mongodb.yaml new file mode 100644 index 0000000..4c0939f --- /dev/null +++ b/environments/final-argo/credentials-mongodb.yaml @@ -0,0 +1,21 @@ +# MongoDB secrets for final-argo. +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: final-argo-mongodb-admin-password + app: + password: final-argo-mongodb-app-password + metrics: + password: final-argo-mongodb-metrics-password + +users: + admin: + enabled: true + metrics: + enabled: true diff --git a/environments/final-argo/credentials-observability.yaml b/environments/final-argo/credentials-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/final-argo/credentials-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/final-argo/external-secrets.example.yaml b/environments/final-argo/external-secrets.example.yaml new file mode 100644 index 0000000..ca3b53a --- /dev/null +++ b/environments/final-argo/external-secrets.example.yaml @@ -0,0 +1,84 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in the chart-specific secrets files under environments//: +# +# environments//credentials-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" +# clickhouse: +# password: "acme-countly-clickhouse-password" +# mongodb: +# password: "acme-mongodb-app-password" +# +# environments//credentials-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "acme-clickhouse-default-user-password" +# +# environments//credentials-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# password: "acme-kafka-connect-clickhouse-password" +# +# environments//credentials-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# admin: +# password: "acme-mongodb-admin-password" +# app: +# password: "acme-mongodb-app-password" +# metrics: +# password: "acme-mongodb-metrics-password" +# +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "acme-gar-dockerconfig" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/final-argo/global.yaml b/environments/final-argo/global.yaml new file mode 100644 index 0000000..09b4d18 --- /dev/null +++ b/environments/final-argo/global.yaml @@ -0,0 +1,39 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: "gcp-secrets" + kind: ClusterSecretStore + remoteRef: + key: "customers-gcr-argo-gar-dockerconfig" + storageClass: "" + imagePullSecrets: + - name: countly-registry + +ingress: + hostname: final-argo.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/final-argo/image-pull-secrets.example.yaml b/environments/final-argo/image-pull-secrets.example.yaml new file mode 100644 index 0000000..f1f537f --- /dev/null +++ b/environments/final-argo/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-gar" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/final-argo/kafka.yaml b/environments/final-argo/kafka.yaml new file mode 100644 index 0000000..ff6fe5e --- /dev/null +++ b/environments/final-argo/kafka.yaml @@ -0,0 +1,2 @@ +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/final-argo/migration.yaml b/environments/final-argo/migration.yaml new file mode 100644 index 0000000..fddc542 --- /dev/null +++ b/environments/final-argo/migration.yaml @@ -0,0 +1 @@ +# Customer-specific migration overrides only. diff --git a/environments/final-argo/mongodb.yaml b/environments/final-argo/mongodb.yaml new file mode 100644 index 0000000..ebe28cc --- /dev/null +++ b/environments/final-argo/mongodb.yaml @@ -0,0 +1,2 @@ +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/final-argo/observability.yaml b/environments/final-argo/observability.yaml new file mode 100644 index 0000000..95d895f --- /dev/null +++ b/environments/final-argo/observability.yaml @@ -0,0 +1 @@ +# Customer-specific observability overrides only. diff --git a/environments/final-argo/secrets.example.yaml b/environments/final-argo/secrets.example.yaml new file mode 100644 index 0000000..181cb54 --- /dev/null +++ b/environments/final-argo/secrets.example.yaml @@ -0,0 +1,62 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//credentials-countly.yaml) --- +secrets: + mode: values + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- +secrets: + mode: values +users: + admin: + enabled: true + password: "CHANGEME-super-admin" + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- +secrets: + mode: values +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- +secrets: + mode: values +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" + +# For External Secrets Operator, switch the per-chart file to: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore diff --git a/environments/final-argo/secrets.sops.example.yaml b/environments/final-argo/secrets.sops.example.yaml new file mode 100644 index 0000000..7335b8d --- /dev/null +++ b/environments/final-argo/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//credentials-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//credentials-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From a07c1da907ec33dde9bf8ea795bd0ec451cc0030 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 16:23:46 +0530 Subject: [PATCH 72/79] Simplify customer scaffold secret modes --- README.md | 8 +- argocd/ONBOARDING.md | 12 +- argocd/README.md | 8 +- scripts/new-argocd-customer.sh | 225 +++++++++++++++++++++++++++++++-- 4 files changed, 240 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 6316684..9d6e893 100644 --- a/README.md +++ b/README.md @@ -198,13 +198,19 @@ Recommended Secret Manager naming convention: For Argo CD managed deployments, scaffold a new customer/cluster with: ```bash -./scripts/new-argocd-customer.sh +./scripts/new-argocd-customer.sh [--secret-mode values|gcp-secrets] ``` This creates: - `environments//` - `argocd/customers/.yaml` +For Secret Manager from day one, prefer: + +```bash +./scripts/new-argocd-customer.sh --secret-mode gcp-secrets +``` + Then: 1. fill in `environments//credentials-*.yaml` 2. commit diff --git a/argocd/ONBOARDING.md b/argocd/ONBOARDING.md index 3009358..bf27aff 100644 --- a/argocd/ONBOARDING.md +++ b/argocd/ONBOARDING.md @@ -103,7 +103,7 @@ kubectl config current-context Run: ```bash -./scripts/new-argocd-customer.sh +./scripts/new-argocd-customer.sh [--secret-mode values|gcp-secrets] ``` Example: @@ -112,10 +112,20 @@ Example: ./scripts/new-argocd-customer.sh northstar https://1.2.3.4 analytics.northstar.example.com ``` +If you plan to use Google Secret Manager from the start, use: + +```bash +./scripts/new-argocd-customer.sh --secret-mode gcp-secrets northstar https://1.2.3.4 analytics.northstar.example.com +``` + This creates: - `argocd/customers/northstar.yaml` - `environments/northstar/` +The difference is: +- `values` writes the credential files in direct-password mode +- `gcp-secrets` writes the credential files already wired for External Secrets and the standard Google Secret Manager key names + ## How To Read Argo CD For One Customer Yes, in the current setup all customer apps appear in the same Argo CD dashboard view. diff --git a/argocd/README.md b/argocd/README.md index a1831fd..af3b173 100644 --- a/argocd/README.md +++ b/argocd/README.md @@ -73,7 +73,7 @@ argocd cluster list Run: ```bash -./scripts/new-argocd-customer.sh +./scripts/new-argocd-customer.sh [--secret-mode values|gcp-secrets] ``` Example: @@ -82,6 +82,12 @@ Example: ./scripts/new-argocd-customer.sh acme https://1.2.3.4 acme.count.ly ``` +If you know the customer will use Google Secret Manager, start with: + +```bash +./scripts/new-argocd-customer.sh --secret-mode gcp-secrets acme https://1.2.3.4 acme.count.ly +``` + This creates: - `argocd/customers/.yaml` diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index 7b58a94..208c425 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -5,18 +5,21 @@ set -euo pipefail usage() { cat <<'EOF' Usage: - scripts/new-argocd-customer.sh [project] + scripts/new-argocd-customer.sh [--secret-mode values|gcp-secrets] [project] Example: scripts/new-argocd-customer.sh acme https://1.2.3.4 acme.count.ly + scripts/new-argocd-customer.sh --secret-mode gcp-secrets acme https://1.2.3.4 acme.count.ly This command: 1. copies environments/reference to environments/ 2. updates environments//global.yaml with the hostname and default profiles - 3. creates argocd/customers/.yaml for the ApplicationSets + 3. writes credentials files for either direct values or GCP Secret Manager + 4. creates argocd/customers/.yaml for the ApplicationSets Defaults: project countly-customers + secretMode values sizing production security open tls letsencrypt @@ -27,15 +30,64 @@ Defaults: EOF } -if [[ $# -lt 3 || $# -gt 4 ]]; then +secret_mode="values" +positionals=() + +while [[ $# -gt 0 ]]; do + case "$1" in + --secret-mode) + if [[ $# -lt 2 ]]; then + echo "Missing value for --secret-mode" >&2 + exit 1 + fi + secret_mode="$2" + shift 2 + ;; + -h|--help) + usage + exit 0 + ;; + --) + shift + while [[ $# -gt 0 ]]; do + positionals+=("$1") + shift + done + ;; + -*) + echo "Unknown option: $1" >&2 + usage + exit 1 + ;; + *) + positionals+=("$1") + shift + ;; + esac +done + +case "${secret_mode}" in + values|direct) + secret_mode="values" + ;; + gcp-secrets) + ;; + *) + echo "Unsupported --secret-mode: ${secret_mode}" >&2 + echo "Supported values: values, gcp-secrets" >&2 + exit 1 + ;; +esac + +if [[ ${#positionals[@]} -lt 3 || ${#positionals[@]} -gt 4 ]]; then usage exit 1 fi -customer="$1" -server="$2" -hostname="$3" -project="${4:-countly-customers}" +customer="${positionals[0]}" +server="${positionals[1]}" +hostname="${positionals[2]}" +project="${positionals[3]:-countly-customers}" repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" env_dir="${repo_root}/environments/${customer}" @@ -124,6 +176,158 @@ cat > "${env_dir}/migration.yaml" <<'EOF' # Customer-specific migration overrides only. EOF +if [[ "${secret_mode}" == "gcp-secrets" ]]; then + cat > "${env_dir}/credentials-countly.yaml" < "${env_dir}/credentials-kafka.yaml" < "${env_dir}/credentials-clickhouse.yaml" < "${env_dir}/credentials-mongodb.yaml" < "${env_dir}/credentials-countly.yaml" <<'EOF' +# Countly secrets — FILL IN before first deploy +# Passwords must match across charts (see secrets.example.yaml) +secrets: + mode: values + common: + encryptionReportsKey: "" # REQUIRED: min 8 chars + webSessionSecret: "" # REQUIRED: min 8 chars + passwordSecret: "" # REQUIRED: min 8 chars + clickhouse: + username: "default" + password: "" # REQUIRED: must match credentials-clickhouse.yaml + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + mongodb: + password: "" # REQUIRED: must match credentials-mongodb.yaml users.app.password +EOF + + cat > "${env_dir}/credentials-kafka.yaml" <<'EOF' +# Kafka secrets — FILL IN before first deploy +secrets: + mode: values + +kafkaConnect: + clickhouse: + password: "" # REQUIRED: must match ClickHouse default user password +EOF + + cat > "${env_dir}/credentials-clickhouse.yaml" <<'EOF' +# ClickHouse secrets — FILL IN before first deploy +secrets: + mode: values + +auth: + defaultUserPassword: + password: "" # REQUIRED: must match credentials-countly.yaml secrets.clickhouse.password +EOF + + cat > "${env_dir}/credentials-mongodb.yaml" <<'EOF' +# MongoDB secrets — FILL IN before first deploy +secrets: + mode: values + +users: + admin: + enabled: true + password: "" # REQUIRED: MongoDB super admin/root-style user + app: + password: "" # REQUIRED: must match credentials-countly.yaml secrets.mongodb.password + metrics: + enabled: true + password: "" # REQUIRED: metrics exporter password +EOF +fi + cat > "${customer_file}" <- convention + 4. If using GCP Secret Manager, create secrets using the ${customer}-- convention 5. Commit and sync countly-bootstrap EOF From 965c9147de3f41eb8e6e2a7d3039c88e7e09ed10 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 16:56:47 +0530 Subject: [PATCH 73/79] Add prod-argo customer --- argocd/customers/prod-argo.yaml | 16 ++++ environments/prod-argo/README.md | 77 +++++++++++++++++ environments/prod-argo/clickhouse.yaml | 2 + .../cluster-secret-store.gcp.example.yaml | 31 +++++++ environments/prod-argo/countly-tls.env | 7 ++ environments/prod-argo/countly.yaml | 2 + .../prod-argo/credentials-clickhouse.yaml | 10 +++ .../prod-argo/credentials-countly.yaml | 22 +++++ environments/prod-argo/credentials-kafka.yaml | 11 +++ .../prod-argo/credentials-migration.yaml | 2 + .../prod-argo/credentials-mongodb.yaml | 21 +++++ .../prod-argo/credentials-observability.yaml | 2 + .../prod-argo/external-secrets.example.yaml | 84 +++++++++++++++++++ environments/prod-argo/global.yaml | 39 +++++++++ .../prod-argo/image-pull-secrets.example.yaml | 41 +++++++++ environments/prod-argo/kafka.yaml | 2 + environments/prod-argo/migration.yaml | 1 + environments/prod-argo/mongodb.yaml | 2 + environments/prod-argo/observability.yaml | 1 + environments/prod-argo/secrets.example.yaml | 62 ++++++++++++++ .../prod-argo/secrets.sops.example.yaml | 21 +++++ scripts/new-argocd-customer.sh | 19 ----- 22 files changed, 456 insertions(+), 19 deletions(-) create mode 100644 argocd/customers/prod-argo.yaml create mode 100644 environments/prod-argo/README.md create mode 100644 environments/prod-argo/clickhouse.yaml create mode 100644 environments/prod-argo/cluster-secret-store.gcp.example.yaml create mode 100644 environments/prod-argo/countly-tls.env create mode 100644 environments/prod-argo/countly.yaml create mode 100644 environments/prod-argo/credentials-clickhouse.yaml create mode 100644 environments/prod-argo/credentials-countly.yaml create mode 100644 environments/prod-argo/credentials-kafka.yaml create mode 100644 environments/prod-argo/credentials-migration.yaml create mode 100644 environments/prod-argo/credentials-mongodb.yaml create mode 100644 environments/prod-argo/credentials-observability.yaml create mode 100644 environments/prod-argo/external-secrets.example.yaml create mode 100644 environments/prod-argo/global.yaml create mode 100644 environments/prod-argo/image-pull-secrets.example.yaml create mode 100644 environments/prod-argo/kafka.yaml create mode 100644 environments/prod-argo/migration.yaml create mode 100644 environments/prod-argo/mongodb.yaml create mode 100644 environments/prod-argo/observability.yaml create mode 100644 environments/prod-argo/secrets.example.yaml create mode 100644 environments/prod-argo/secrets.sops.example.yaml diff --git a/argocd/customers/prod-argo.yaml b/argocd/customers/prod-argo.yaml new file mode 100644 index 0000000..15b6770 --- /dev/null +++ b/argocd/customers/prod-argo.yaml @@ -0,0 +1,16 @@ +customer: prod-argo +environment: prod-argo +project: countly-customers +server: https://34.60.53.179 +gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: prod-argo +clusterLocation: us-central1-a +hostname: prod-argo.count.ly +sizing: tier1 +security: open +tls: letsencrypt +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/prod-argo/README.md b/environments/prod-argo/README.md new file mode 100644 index 0000000..65a5fb3 --- /dev/null +++ b/environments/prod-argo/README.md @@ -0,0 +1,77 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + +3. Fill in required secrets in the chart-specific files: + - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/prod-argo/clickhouse.yaml b/environments/prod-argo/clickhouse.yaml new file mode 100644 index 0000000..17291a9 --- /dev/null +++ b/environments/prod-argo/clickhouse.yaml @@ -0,0 +1,2 @@ +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/prod-argo/cluster-secret-store.gcp.example.yaml b/environments/prod-argo/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..7bb563f --- /dev/null +++ b/environments/prod-argo/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/prod-argo/countly-tls.env b/environments/prod-argo/countly-tls.env new file mode 100644 index 0000000..dd467a5 --- /dev/null +++ b/environments/prod-argo/countly-tls.env @@ -0,0 +1,7 @@ +# Countly TLS Certificate Configuration - Template +# Copy this file to countly-tls.env and update with real values + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= \ No newline at end of file diff --git a/environments/prod-argo/countly.yaml b/environments/prod-argo/countly.yaml new file mode 100644 index 0000000..b71d75e --- /dev/null +++ b/environments/prod-argo/countly.yaml @@ -0,0 +1,2 @@ +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/prod-argo/credentials-clickhouse.yaml b/environments/prod-argo/credentials-clickhouse.yaml new file mode 100644 index 0000000..5ac7a72 --- /dev/null +++ b/environments/prod-argo/credentials-clickhouse.yaml @@ -0,0 +1,10 @@ +# ClickHouse secrets sourced from Google Secret Manager through External Secrets. +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: "prod-argo-clickhouse-default-user-password" diff --git a/environments/prod-argo/credentials-countly.yaml b/environments/prod-argo/credentials-countly.yaml new file mode 100644 index 0000000..c0acb16 --- /dev/null +++ b/environments/prod-argo/credentials-countly.yaml @@ -0,0 +1,22 @@ +# Countly secrets sourced from Google Secret Manager through External Secrets. +secrets: + mode: externalSecret + clickhouse: + username: "default" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: "prod-argo-countly-encryption-reports-key" + webSessionSecret: "prod-argo-countly-web-session-secret" + passwordSecret: "prod-argo-countly-password-secret" + clickhouse: + password: "prod-argo-countly-clickhouse-password" + mongodb: + password: "prod-argo-mongodb-app-password" diff --git a/environments/prod-argo/credentials-kafka.yaml b/environments/prod-argo/credentials-kafka.yaml new file mode 100644 index 0000000..3617568 --- /dev/null +++ b/environments/prod-argo/credentials-kafka.yaml @@ -0,0 +1,11 @@ +# Kafka secrets sourced from Google Secret Manager through External Secrets. +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + password: "prod-argo-kafka-connect-clickhouse-password" diff --git a/environments/prod-argo/credentials-migration.yaml b/environments/prod-argo/credentials-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/prod-argo/credentials-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/prod-argo/credentials-mongodb.yaml b/environments/prod-argo/credentials-mongodb.yaml new file mode 100644 index 0000000..15629d9 --- /dev/null +++ b/environments/prod-argo/credentials-mongodb.yaml @@ -0,0 +1,21 @@ +# MongoDB secrets sourced from Google Secret Manager through External Secrets. +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: "prod-argo-mongodb-admin-password" + app: + password: "prod-argo-mongodb-app-password" + metrics: + password: "prod-argo-mongodb-metrics-password" + +users: + admin: + enabled: true + metrics: + enabled: true diff --git a/environments/prod-argo/credentials-observability.yaml b/environments/prod-argo/credentials-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/prod-argo/credentials-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/prod-argo/external-secrets.example.yaml b/environments/prod-argo/external-secrets.example.yaml new file mode 100644 index 0000000..ca3b53a --- /dev/null +++ b/environments/prod-argo/external-secrets.example.yaml @@ -0,0 +1,84 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in the chart-specific secrets files under environments//: +# +# environments//credentials-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" +# clickhouse: +# password: "acme-countly-clickhouse-password" +# mongodb: +# password: "acme-mongodb-app-password" +# +# environments//credentials-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "acme-clickhouse-default-user-password" +# +# environments//credentials-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# password: "acme-kafka-connect-clickhouse-password" +# +# environments//credentials-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# admin: +# password: "acme-mongodb-admin-password" +# app: +# password: "acme-mongodb-app-password" +# metrics: +# password: "acme-mongodb-metrics-password" +# +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "acme-gar-dockerconfig" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/prod-argo/global.yaml b/environments/prod-argo/global.yaml new file mode 100644 index 0000000..1ad62c5 --- /dev/null +++ b/environments/prod-argo/global.yaml @@ -0,0 +1,39 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: letsencrypt + security: open + + imageRegistry: "" + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: "gcp-secrets" + kind: ClusterSecretStore + remoteRef: + key: "customers-gcr-argo-gar-dockerconfig" + storageClass: "" + imagePullSecrets: + - name: countly-registry + +ingress: + hostname: prod-argo.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/prod-argo/image-pull-secrets.example.yaml b/environments/prod-argo/image-pull-secrets.example.yaml new file mode 100644 index 0000000..f1f537f --- /dev/null +++ b/environments/prod-argo/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-gar" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-gar + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/prod-argo/kafka.yaml b/environments/prod-argo/kafka.yaml new file mode 100644 index 0000000..ff6fe5e --- /dev/null +++ b/environments/prod-argo/kafka.yaml @@ -0,0 +1,2 @@ +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/prod-argo/migration.yaml b/environments/prod-argo/migration.yaml new file mode 100644 index 0000000..fddc542 --- /dev/null +++ b/environments/prod-argo/migration.yaml @@ -0,0 +1 @@ +# Customer-specific migration overrides only. diff --git a/environments/prod-argo/mongodb.yaml b/environments/prod-argo/mongodb.yaml new file mode 100644 index 0000000..ebe28cc --- /dev/null +++ b/environments/prod-argo/mongodb.yaml @@ -0,0 +1,2 @@ +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/prod-argo/observability.yaml b/environments/prod-argo/observability.yaml new file mode 100644 index 0000000..95d895f --- /dev/null +++ b/environments/prod-argo/observability.yaml @@ -0,0 +1 @@ +# Customer-specific observability overrides only. diff --git a/environments/prod-argo/secrets.example.yaml b/environments/prod-argo/secrets.example.yaml new file mode 100644 index 0000000..181cb54 --- /dev/null +++ b/environments/prod-argo/secrets.example.yaml @@ -0,0 +1,62 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//credentials-countly.yaml) --- +secrets: + mode: values + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- +secrets: + mode: values +users: + admin: + enabled: true + password: "CHANGEME-super-admin" + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- +secrets: + mode: values +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- +secrets: + mode: values +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" + +# For External Secrets Operator, switch the per-chart file to: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore diff --git a/environments/prod-argo/secrets.sops.example.yaml b/environments/prod-argo/secrets.sops.example.yaml new file mode 100644 index 0000000..7335b8d --- /dev/null +++ b/environments/prod-argo/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//credentials-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//credentials-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index 208c425..b894d8f 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -181,18 +181,11 @@ if [[ "${secret_mode}" == "gcp-secrets" ]]; then # Countly secrets sourced from Google Secret Manager through External Secrets. secrets: mode: externalSecret - common: - encryptionReportsKey: "" - webSessionSecret: "" - passwordSecret: "" clickhouse: username: "default" - password: "" database: "countly_drill" kafka: securityProtocol: "PLAINTEXT" - mongodb: - password: "" externalSecret: refreshInterval: "1h" secretStoreRef: @@ -221,10 +214,6 @@ secrets: remoteRefs: clickhouse: password: "${customer}-kafka-connect-clickhouse-password" - -kafkaConnect: - clickhouse: - password: "" EOF cat > "${env_dir}/credentials-clickhouse.yaml" < "${env_dir}/credentials-mongodb.yaml" < "${env_dir}/credentials-countly.yaml" <<'EOF' From 47bbb2b462ddda7e57e83048a50cac9cdc116073 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 17:33:03 +0530 Subject: [PATCH 74/79] Add GSM TLS support and remove test customers --- README.md | 1 + argocd/ONBOARDING.md | 28 +++++++ argocd/customers/final-argo.yaml | 16 ---- argocd/customers/prod-argo.yaml | 16 ---- .../06-letsencrypt-prod-issuer-app.yaml | 2 +- .../external-secret-ingress-tls.yaml | 30 +++++++ charts/countly/values.schema.json | 33 ++++++++ charts/countly/values.yaml | 9 ++ docs/DEPLOYMENT-MODES.md | 19 ++++- environments/final-argo/README.md | 77 ----------------- environments/final-argo/clickhouse.yaml | 2 - .../cluster-secret-store.gcp.example.yaml | 31 ------- environments/final-argo/countly-tls.env | 7 -- environments/final-argo/countly.yaml | 2 - .../final-argo/credentials-clickhouse.yaml | 10 --- .../final-argo/credentials-countly.yaml | 22 ----- .../final-argo/credentials-kafka.yaml | 11 --- .../final-argo/credentials-migration.yaml | 2 - .../final-argo/credentials-mongodb.yaml | 21 ----- .../final-argo/credentials-observability.yaml | 2 - .../final-argo/external-secrets.example.yaml | 84 ------------------- environments/final-argo/global.yaml | 39 --------- .../image-pull-secrets.example.yaml | 41 --------- environments/final-argo/kafka.yaml | 2 - environments/final-argo/migration.yaml | 1 - environments/final-argo/mongodb.yaml | 2 - environments/final-argo/observability.yaml | 1 - environments/final-argo/secrets.example.yaml | 62 -------------- .../final-argo/secrets.sops.example.yaml | 21 ----- environments/prod-argo/README.md | 77 ----------------- environments/prod-argo/clickhouse.yaml | 2 - .../cluster-secret-store.gcp.example.yaml | 31 ------- environments/prod-argo/countly-tls.env | 7 -- environments/prod-argo/countly.yaml | 2 - .../prod-argo/credentials-clickhouse.yaml | 10 --- .../prod-argo/credentials-countly.yaml | 22 ----- environments/prod-argo/credentials-kafka.yaml | 11 --- .../prod-argo/credentials-migration.yaml | 2 - .../prod-argo/credentials-mongodb.yaml | 21 ----- .../prod-argo/credentials-observability.yaml | 2 - .../prod-argo/external-secrets.example.yaml | 84 ------------------- environments/prod-argo/global.yaml | 39 --------- .../prod-argo/image-pull-secrets.example.yaml | 41 --------- environments/prod-argo/kafka.yaml | 2 - environments/prod-argo/migration.yaml | 1 - environments/prod-argo/mongodb.yaml | 2 - environments/prod-argo/observability.yaml | 1 - environments/prod-argo/secrets.example.yaml | 62 -------------- .../prod-argo/secrets.sops.example.yaml | 21 ----- environments/reference/countly.yaml | 9 ++ scripts/new-argocd-customer.sh | 28 +++++-- 51 files changed, 152 insertions(+), 919 deletions(-) delete mode 100644 argocd/customers/final-argo.yaml delete mode 100644 argocd/customers/prod-argo.yaml create mode 100644 charts/countly/templates/external-secret-ingress-tls.yaml delete mode 100644 environments/final-argo/README.md delete mode 100644 environments/final-argo/clickhouse.yaml delete mode 100644 environments/final-argo/cluster-secret-store.gcp.example.yaml delete mode 100644 environments/final-argo/countly-tls.env delete mode 100644 environments/final-argo/countly.yaml delete mode 100644 environments/final-argo/credentials-clickhouse.yaml delete mode 100644 environments/final-argo/credentials-countly.yaml delete mode 100644 environments/final-argo/credentials-kafka.yaml delete mode 100644 environments/final-argo/credentials-migration.yaml delete mode 100644 environments/final-argo/credentials-mongodb.yaml delete mode 100644 environments/final-argo/credentials-observability.yaml delete mode 100644 environments/final-argo/external-secrets.example.yaml delete mode 100644 environments/final-argo/global.yaml delete mode 100644 environments/final-argo/image-pull-secrets.example.yaml delete mode 100644 environments/final-argo/kafka.yaml delete mode 100644 environments/final-argo/migration.yaml delete mode 100644 environments/final-argo/mongodb.yaml delete mode 100644 environments/final-argo/observability.yaml delete mode 100644 environments/final-argo/secrets.example.yaml delete mode 100644 environments/final-argo/secrets.sops.example.yaml delete mode 100644 environments/prod-argo/README.md delete mode 100644 environments/prod-argo/clickhouse.yaml delete mode 100644 environments/prod-argo/cluster-secret-store.gcp.example.yaml delete mode 100644 environments/prod-argo/countly-tls.env delete mode 100644 environments/prod-argo/countly.yaml delete mode 100644 environments/prod-argo/credentials-clickhouse.yaml delete mode 100644 environments/prod-argo/credentials-countly.yaml delete mode 100644 environments/prod-argo/credentials-kafka.yaml delete mode 100644 environments/prod-argo/credentials-migration.yaml delete mode 100644 environments/prod-argo/credentials-mongodb.yaml delete mode 100644 environments/prod-argo/credentials-observability.yaml delete mode 100644 environments/prod-argo/external-secrets.example.yaml delete mode 100644 environments/prod-argo/global.yaml delete mode 100644 environments/prod-argo/image-pull-secrets.example.yaml delete mode 100644 environments/prod-argo/kafka.yaml delete mode 100644 environments/prod-argo/migration.yaml delete mode 100644 environments/prod-argo/mongodb.yaml delete mode 100644 environments/prod-argo/observability.yaml delete mode 100644 environments/prod-argo/secrets.example.yaml delete mode 100644 environments/prod-argo/secrets.sops.example.yaml diff --git a/README.md b/README.md index 9d6e893..82516ee 100644 --- a/README.md +++ b/README.md @@ -179,6 +179,7 @@ For a GAR-backed production example, see [environments/example-production/global For GitOps-managed pull secrets, start from [environments/reference/image-pull-secrets.example.yaml](/Users/admin/cly/helm/environments/reference/image-pull-secrets.example.yaml) and encrypt or template it before committing. For Secret Manager + External Secrets Operator, set `global.imagePullSecretExternalSecret` in your environment `global.yaml` so Countly can create its namespaced `dockerconfigjson` pull secret. Application secrets can use the same pattern in `credentials-countly.yaml`, `credentials-kafka.yaml`, `credentials-clickhouse.yaml`, and `credentials-mongodb.yaml` by switching `secrets.mode` to `externalSecret` and filling `secrets.externalSecret.remoteRefs`. +Countly ingress TLS can also use the same pattern: set customer `tls: provided`, then enable `ingress.tls.externalSecret` in `countly.yaml` to materialize a `kubernetes.io/tls` secret from Secret Manager keys such as `countly-prod-tls-crt` and `countly-prod-tls-key`. Recommended Secret Manager naming convention: - `-gar-dockerconfig` diff --git a/argocd/ONBOARDING.md b/argocd/ONBOARDING.md index bf27aff..aae0f9c 100644 --- a/argocd/ONBOARDING.md +++ b/argocd/ONBOARDING.md @@ -346,6 +346,34 @@ global: This GAR pull-secret path is for Countly application images. Kafka Connect uses the public `countly/strimzi-kafka-connect-clickhouse` image by default. +### Provided TLS + Secret Manager + +If you want to use your own certificate instead of Let's Encrypt: + +1. set customer `tls: provided` +2. keep or enable the generated `countly.yaml` TLS External Secret block +3. create these Secret Manager keys once if you want to reuse the same cert for many customers: + - `countly-prod-tls-crt` + - `countly-prod-tls-key` + +Example: + +```yaml +ingress: + tls: + externalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + tlsCrt: countly-prod-tls-crt + tlsKey: countly-prod-tls-key +``` + +This creates the Countly ingress TLS secret automatically in the `countly` namespace, so you do not need a separate manual TLS manifest per customer. + ## Step 5: If Using Secret Manager, Prepare The Cluster This is the production path. diff --git a/argocd/customers/final-argo.yaml b/argocd/customers/final-argo.yaml deleted file mode 100644 index 83f988d..0000000 --- a/argocd/customers/final-argo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -customer: final-argo -environment: final-argo -project: countly-customers -server: https://34.132.2.255 -gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com -secretManagerProjectID: countly-tools -clusterProjectID: countly-dev-313620 -clusterName: final-argo -clusterLocation: us-central1-a -hostname: final-argo.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/argocd/customers/prod-argo.yaml b/argocd/customers/prod-argo.yaml deleted file mode 100644 index 15b6770..0000000 --- a/argocd/customers/prod-argo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -customer: prod-argo -environment: prod-argo -project: countly-customers -server: https://34.60.53.179 -gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com -secretManagerProjectID: countly-tools -clusterProjectID: countly-dev-313620 -clusterName: prod-argo -clusterLocation: us-central1-a -hostname: prod-argo.count.ly -sizing: tier1 -security: open -tls: letsencrypt -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/argocd/operators/06-letsencrypt-prod-issuer-app.yaml b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml index 19eb7f0..161d16b 100644 --- a/argocd/operators/06-letsencrypt-prod-issuer-app.yaml +++ b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml @@ -23,7 +23,7 @@ spec: source: repoURL: https://github.com/Countly/helm.git targetRevision: gcp-artifact-rep-integration - path: argocd/operator-manifests/letsencrypt-prod-issuer + path: '{{ if eq .tls "letsencrypt" }}argocd/operator-manifests/letsencrypt-prod-issuer{{ else }}charts/noop{{ end }}' directory: recurse: true destination: diff --git a/charts/countly/templates/external-secret-ingress-tls.yaml b/charts/countly/templates/external-secret-ingress-tls.yaml new file mode 100644 index 0000000..3fb8d9f --- /dev/null +++ b/charts/countly/templates/external-secret-ingress-tls.yaml @@ -0,0 +1,30 @@ +{{- $tlsMode := include "countly.tls.mode" . -}} +{{- if and (eq $tlsMode "existingSecret") .Values.ingress.tls.externalSecret.enabled }} +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: {{ include "countly.tls.secretName" . }} + labels: + {{- include "countly.labels" . | nindent 4 }} + {{- if .Values.argocd.enabled }} + annotations: + {{- include "countly.syncWave" (dict "wave" "1" "root" .) | nindent 4 }} + {{- end }} +spec: + refreshInterval: {{ .Values.ingress.tls.externalSecret.refreshInterval | default "1h" }} + secretStoreRef: + name: {{ required "ingress.tls.externalSecret.secretStoreRef.name is required when ingress.tls.externalSecret.enabled is true" .Values.ingress.tls.externalSecret.secretStoreRef.name }} + kind: {{ .Values.ingress.tls.externalSecret.secretStoreRef.kind | default "ClusterSecretStore" }} + target: + name: {{ include "countly.tls.secretName" . }} + creationPolicy: Owner + template: + type: kubernetes.io/tls + data: + - secretKey: tls.crt + remoteRef: + key: {{ required "ingress.tls.externalSecret.remoteRefs.tlsCrt is required when ingress.tls.externalSecret.enabled is true" .Values.ingress.tls.externalSecret.remoteRefs.tlsCrt }} + - secretKey: tls.key + remoteRef: + key: {{ required "ingress.tls.externalSecret.remoteRefs.tlsKey is required when ingress.tls.externalSecret.enabled is true" .Values.ingress.tls.externalSecret.remoteRefs.tlsKey }} +{{- end }} diff --git a/charts/countly/values.schema.json b/charts/countly/values.schema.json index 31a0813..35af06f 100644 --- a/charts/countly/values.schema.json +++ b/charts/countly/values.schema.json @@ -370,6 +370,39 @@ "secretName": { "type": "string" }, + "externalSecret": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "refreshInterval": { + "type": "string" + }, + "secretStoreRef": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "kind": { + "type": "string" + } + } + }, + "remoteRefs": { + "type": "object", + "properties": { + "tlsCrt": { + "type": "string" + }, + "tlsKey": { + "type": "string" + } + } + } + } + }, "selfSigned": { "type": "object", "properties": { diff --git a/charts/countly/values.yaml b/charts/countly/values.yaml index cf1081d..3664588 100644 --- a/charts/countly/values.yaml +++ b/charts/countly/values.yaml @@ -514,6 +514,15 @@ ingress: mode: http clusterIssuer: letsencrypt-prod secretName: "" # Auto-derived if empty: -tls + externalSecret: + enabled: false + refreshInterval: "1h" + secretStoreRef: + name: "" + kind: ClusterSecretStore + remoteRefs: + tlsCrt: "countly-prod-tls-crt" + tlsKey: "countly-prod-tls-key" selfSigned: issuerName: "" # Auto-derived if empty: -ca-issuer caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/docs/DEPLOYMENT-MODES.md b/docs/DEPLOYMENT-MODES.md index d6365b4..2be4c3a 100644 --- a/docs/DEPLOYMENT-MODES.md +++ b/docs/DEPLOYMENT-MODES.md @@ -8,7 +8,7 @@ Set in `global.yaml` -> `ingress.tls.mode`: |------|-------------|-------------| | `http` | No TLS (default) | None | | `letsencrypt` | Auto-provisioned via cert-manager | cert-manager + ClusterIssuer, DNS pointing to ingress | -| `existingSecret` | Pre-created TLS secret | Kubernetes TLS secret in countly namespace | +| `existingSecret` | Pre-created TLS secret or ExternalSecret-created TLS secret | Kubernetes TLS secret in countly namespace | | `selfSigned` | Self-signed CA via cert-manager | cert-manager (dev/local only) | ### Let's Encrypt Example @@ -29,6 +29,23 @@ ingress: secretName: my-tls-cert # Must exist in countly namespace ``` +### Existing Certificate From Secret Manager Example +```yaml +ingress: + hostname: analytics.example.com + tls: + mode: existingSecret + secretName: my-tls-cert + externalSecret: + enabled: true + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + tlsCrt: countly-prod-tls-crt + tlsKey: countly-prod-tls-key +``` + ## Backing Service Modes Set in `global.yaml` -> `backingServices..mode`: diff --git a/environments/final-argo/README.md b/environments/final-argo/README.md deleted file mode 100644 index 65a5fb3..0000000 --- a/environments/final-argo/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` - -3. Fill in required secrets in the chart-specific files: - - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -For private registries such as GAR, also create namespaced image pull secrets. -Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. -If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `credentials-mongodb.yaml` | MongoDB user passwords | -| `credentials-clickhouse.yaml` | ClickHouse auth password | -| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | -| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | -| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | -| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/final-argo/clickhouse.yaml b/environments/final-argo/clickhouse.yaml deleted file mode 100644 index 17291a9..0000000 --- a/environments/final-argo/clickhouse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific ClickHouse overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/final-argo/cluster-secret-store.gcp.example.yaml b/environments/final-argo/cluster-secret-store.gcp.example.yaml deleted file mode 100644 index 7bb563f..0000000 --- a/environments/final-argo/cluster-secret-store.gcp.example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# External Secrets Operator + Google Secret Manager -# ClusterSecretStore Example -# ============================================================================= -# Apply this once per cluster after External Secrets Operator is installed. -# -# Prerequisites: -# - The external-secrets controller service account is annotated for Workload -# Identity with a GCP service account that can read Secret Manager secrets. -# - The GCP service account has at least: -# roles/secretmanager.secretAccessor -# -# This file is a reference only. Adapt project IDs and names to your cluster. -# ============================================================================= - -apiVersion: external-secrets.io/v1 -kind: ClusterSecretStore -metadata: - name: gcp-secrets -spec: - provider: - gcpsm: - projectID: countly-dev-313620 - auth: - workloadIdentity: - clusterLocation: us-central1 - clusterName: change-me - clusterProjectID: countly-dev-313620 - serviceAccountRef: - name: external-secrets - namespace: external-secrets diff --git a/environments/final-argo/countly-tls.env b/environments/final-argo/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/final-argo/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/final-argo/countly.yaml b/environments/final-argo/countly.yaml deleted file mode 100644 index b71d75e..0000000 --- a/environments/final-argo/countly.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Countly overrides only. -# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/final-argo/credentials-clickhouse.yaml b/environments/final-argo/credentials-clickhouse.yaml deleted file mode 100644 index 60be2cf..0000000 --- a/environments/final-argo/credentials-clickhouse.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# ClickHouse secrets for final-argo. -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - defaultUserPassword: final-argo-clickhouse-default-user-password diff --git a/environments/final-argo/credentials-countly.yaml b/environments/final-argo/credentials-countly.yaml deleted file mode 100644 index bd3b7e8..0000000 --- a/environments/final-argo/credentials-countly.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Countly secrets for final-argo. -secrets: - mode: externalSecret - clickhouse: - username: "default" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: final-argo-countly-encryption-reports-key - webSessionSecret: final-argo-countly-web-session-secret - passwordSecret: final-argo-countly-password-secret - clickhouse: - password: final-argo-countly-clickhouse-password - mongodb: - password: final-argo-mongodb-app-password diff --git a/environments/final-argo/credentials-kafka.yaml b/environments/final-argo/credentials-kafka.yaml deleted file mode 100644 index 6f2ad31..0000000 --- a/environments/final-argo/credentials-kafka.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# Kafka secrets for final-argo. -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - clickhouse: - password: final-argo-kafka-connect-clickhouse-password diff --git a/environments/final-argo/credentials-migration.yaml b/environments/final-argo/credentials-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/final-argo/credentials-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/final-argo/credentials-mongodb.yaml b/environments/final-argo/credentials-mongodb.yaml deleted file mode 100644 index 4c0939f..0000000 --- a/environments/final-argo/credentials-mongodb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# MongoDB secrets for final-argo. -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - admin: - password: final-argo-mongodb-admin-password - app: - password: final-argo-mongodb-app-password - metrics: - password: final-argo-mongodb-metrics-password - -users: - admin: - enabled: true - metrics: - enabled: true diff --git a/environments/final-argo/credentials-observability.yaml b/environments/final-argo/credentials-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/final-argo/credentials-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/final-argo/external-secrets.example.yaml b/environments/final-argo/external-secrets.example.yaml deleted file mode 100644 index ca3b53a..0000000 --- a/environments/final-argo/external-secrets.example.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in the chart-specific secrets files under environments//: -# -# environments//credentials-countly.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "acme-countly-encryption-reports-key" -# webSessionSecret: "acme-countly-web-session-secret" -# passwordSecret: "acme-countly-password-secret" -# clickhouse: -# password: "acme-countly-clickhouse-password" -# mongodb: -# password: "acme-mongodb-app-password" -# -# environments//credentials-clickhouse.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# defaultUserPassword: "acme-clickhouse-default-user-password" -# -# environments//credentials-kafka.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# clickhouse: -# password: "acme-kafka-connect-clickhouse-password" -# -# environments//credentials-mongodb.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# admin: -# password: "acme-mongodb-admin-password" -# app: -# password: "acme-mongodb-app-password" -# metrics: -# password: "acme-mongodb-metrics-password" -# -# For GAR image pulls, configure this in environments//global.yaml: -# -# global: -# imagePullSecrets: -# - name: countly-registry -# imagePullSecretExternalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRef: -# key: "acme-gar-dockerconfig" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/final-argo/global.yaml b/environments/final-argo/global.yaml deleted file mode 100644 index 09b4d18..0000000 --- a/environments/final-argo/global.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: "gcp-secrets" - kind: ClusterSecretStore - remoteRef: - key: "customers-gcr-argo-gar-dockerconfig" - storageClass: "" - imagePullSecrets: - - name: countly-registry - -ingress: - hostname: final-argo.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/final-argo/image-pull-secrets.example.yaml b/environments/final-argo/image-pull-secrets.example.yaml deleted file mode 100644 index f1f537f..0000000 --- a/environments/final-argo/image-pull-secrets.example.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# ============================================================================= -# Image Pull Secrets Example -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. -# -# Use this when Countly and Kafka Connect pull from a private registry -# such as GCP Artifact Registry (GAR). -# -# Replace: -# - metadata.name with your actual secret name if not using "countly-gar" -# - namespaces if your releases run elsewhere -# - .dockerconfigjson with the base64-encoded contents of your Docker config -# -# You need one secret per namespace because imagePullSecrets are namespaced. -# For the default layout in this repo, create the same secret in: -# - countly -# - kafka -# -# Example source for the Docker config: -# cat ~/.docker/config.json | base64 | tr -d '\n' -# -# Kubernetes secret type must be: kubernetes.io/dockerconfigjson -# ============================================================================= - -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: countly -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON ---- -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: kafka -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/final-argo/kafka.yaml b/environments/final-argo/kafka.yaml deleted file mode 100644 index ff6fe5e..0000000 --- a/environments/final-argo/kafka.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Kafka overrides only. -# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/final-argo/migration.yaml b/environments/final-argo/migration.yaml deleted file mode 100644 index fddc542..0000000 --- a/environments/final-argo/migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific migration overrides only. diff --git a/environments/final-argo/mongodb.yaml b/environments/final-argo/mongodb.yaml deleted file mode 100644 index ebe28cc..0000000 --- a/environments/final-argo/mongodb.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific MongoDB overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/final-argo/observability.yaml b/environments/final-argo/observability.yaml deleted file mode 100644 index 95d895f..0000000 --- a/environments/final-argo/observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific observability overrides only. diff --git a/environments/final-argo/secrets.example.yaml b/environments/final-argo/secrets.example.yaml deleted file mode 100644 index 181cb54..0000000 --- a/environments/final-argo/secrets.example.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//credentials-countly.yaml) --- -secrets: - mode: values - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- -secrets: - mode: values -users: - admin: - enabled: true - password: "CHANGEME-super-admin" - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- -secrets: - mode: values -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//credentials-kafka.yaml) --- -secrets: - mode: values -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" - -# For External Secrets Operator, switch the per-chart file to: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore diff --git a/environments/final-argo/secrets.sops.example.yaml b/environments/final-argo/secrets.sops.example.yaml deleted file mode 100644 index 7335b8d..0000000 --- a/environments/final-argo/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//credentials-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//credentials-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx diff --git a/environments/prod-argo/README.md b/environments/prod-argo/README.md deleted file mode 100644 index 65a5fb3..0000000 --- a/environments/prod-argo/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` - -3. Fill in required secrets in the chart-specific files: - - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -For private registries such as GAR, also create namespaced image pull secrets. -Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. -If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `credentials-mongodb.yaml` | MongoDB user passwords | -| `credentials-clickhouse.yaml` | ClickHouse auth password | -| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | -| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | -| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | -| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/prod-argo/clickhouse.yaml b/environments/prod-argo/clickhouse.yaml deleted file mode 100644 index 17291a9..0000000 --- a/environments/prod-argo/clickhouse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific ClickHouse overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/prod-argo/cluster-secret-store.gcp.example.yaml b/environments/prod-argo/cluster-secret-store.gcp.example.yaml deleted file mode 100644 index 7bb563f..0000000 --- a/environments/prod-argo/cluster-secret-store.gcp.example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# External Secrets Operator + Google Secret Manager -# ClusterSecretStore Example -# ============================================================================= -# Apply this once per cluster after External Secrets Operator is installed. -# -# Prerequisites: -# - The external-secrets controller service account is annotated for Workload -# Identity with a GCP service account that can read Secret Manager secrets. -# - The GCP service account has at least: -# roles/secretmanager.secretAccessor -# -# This file is a reference only. Adapt project IDs and names to your cluster. -# ============================================================================= - -apiVersion: external-secrets.io/v1 -kind: ClusterSecretStore -metadata: - name: gcp-secrets -spec: - provider: - gcpsm: - projectID: countly-dev-313620 - auth: - workloadIdentity: - clusterLocation: us-central1 - clusterName: change-me - clusterProjectID: countly-dev-313620 - serviceAccountRef: - name: external-secrets - namespace: external-secrets diff --git a/environments/prod-argo/countly-tls.env b/environments/prod-argo/countly-tls.env deleted file mode 100644 index dd467a5..0000000 --- a/environments/prod-argo/countly-tls.env +++ /dev/null @@ -1,7 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file diff --git a/environments/prod-argo/countly.yaml b/environments/prod-argo/countly.yaml deleted file mode 100644 index b71d75e..0000000 --- a/environments/prod-argo/countly.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Countly overrides only. -# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. diff --git a/environments/prod-argo/credentials-clickhouse.yaml b/environments/prod-argo/credentials-clickhouse.yaml deleted file mode 100644 index 5ac7a72..0000000 --- a/environments/prod-argo/credentials-clickhouse.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# ClickHouse secrets sourced from Google Secret Manager through External Secrets. -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - defaultUserPassword: "prod-argo-clickhouse-default-user-password" diff --git a/environments/prod-argo/credentials-countly.yaml b/environments/prod-argo/credentials-countly.yaml deleted file mode 100644 index c0acb16..0000000 --- a/environments/prod-argo/credentials-countly.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Countly secrets sourced from Google Secret Manager through External Secrets. -secrets: - mode: externalSecret - clickhouse: - username: "default" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: "prod-argo-countly-encryption-reports-key" - webSessionSecret: "prod-argo-countly-web-session-secret" - passwordSecret: "prod-argo-countly-password-secret" - clickhouse: - password: "prod-argo-countly-clickhouse-password" - mongodb: - password: "prod-argo-mongodb-app-password" diff --git a/environments/prod-argo/credentials-kafka.yaml b/environments/prod-argo/credentials-kafka.yaml deleted file mode 100644 index 3617568..0000000 --- a/environments/prod-argo/credentials-kafka.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# Kafka secrets sourced from Google Secret Manager through External Secrets. -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - clickhouse: - password: "prod-argo-kafka-connect-clickhouse-password" diff --git a/environments/prod-argo/credentials-migration.yaml b/environments/prod-argo/credentials-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/prod-argo/credentials-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/prod-argo/credentials-mongodb.yaml b/environments/prod-argo/credentials-mongodb.yaml deleted file mode 100644 index 15629d9..0000000 --- a/environments/prod-argo/credentials-mongodb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# MongoDB secrets sourced from Google Secret Manager through External Secrets. -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - admin: - password: "prod-argo-mongodb-admin-password" - app: - password: "prod-argo-mongodb-app-password" - metrics: - password: "prod-argo-mongodb-metrics-password" - -users: - admin: - enabled: true - metrics: - enabled: true diff --git a/environments/prod-argo/credentials-observability.yaml b/environments/prod-argo/credentials-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/prod-argo/credentials-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/prod-argo/external-secrets.example.yaml b/environments/prod-argo/external-secrets.example.yaml deleted file mode 100644 index ca3b53a..0000000 --- a/environments/prod-argo/external-secrets.example.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in the chart-specific secrets files under environments//: -# -# environments//credentials-countly.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "acme-countly-encryption-reports-key" -# webSessionSecret: "acme-countly-web-session-secret" -# passwordSecret: "acme-countly-password-secret" -# clickhouse: -# password: "acme-countly-clickhouse-password" -# mongodb: -# password: "acme-mongodb-app-password" -# -# environments//credentials-clickhouse.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# defaultUserPassword: "acme-clickhouse-default-user-password" -# -# environments//credentials-kafka.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# clickhouse: -# password: "acme-kafka-connect-clickhouse-password" -# -# environments//credentials-mongodb.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# admin: -# password: "acme-mongodb-admin-password" -# app: -# password: "acme-mongodb-app-password" -# metrics: -# password: "acme-mongodb-metrics-password" -# -# For GAR image pulls, configure this in environments//global.yaml: -# -# global: -# imagePullSecrets: -# - name: countly-registry -# imagePullSecretExternalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRef: -# key: "acme-gar-dockerconfig" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/prod-argo/global.yaml b/environments/prod-argo/global.yaml deleted file mode 100644 index 1ad62c5..0000000 --- a/environments/prod-argo/global.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: letsencrypt - security: open - - imageRegistry: "" - imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: "gcp-secrets" - kind: ClusterSecretStore - remoteRef: - key: "customers-gcr-argo-gar-dockerconfig" - storageClass: "" - imagePullSecrets: - - name: countly-registry - -ingress: - hostname: prod-argo.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/prod-argo/image-pull-secrets.example.yaml b/environments/prod-argo/image-pull-secrets.example.yaml deleted file mode 100644 index f1f537f..0000000 --- a/environments/prod-argo/image-pull-secrets.example.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# ============================================================================= -# Image Pull Secrets Example -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. -# -# Use this when Countly and Kafka Connect pull from a private registry -# such as GCP Artifact Registry (GAR). -# -# Replace: -# - metadata.name with your actual secret name if not using "countly-gar" -# - namespaces if your releases run elsewhere -# - .dockerconfigjson with the base64-encoded contents of your Docker config -# -# You need one secret per namespace because imagePullSecrets are namespaced. -# For the default layout in this repo, create the same secret in: -# - countly -# - kafka -# -# Example source for the Docker config: -# cat ~/.docker/config.json | base64 | tr -d '\n' -# -# Kubernetes secret type must be: kubernetes.io/dockerconfigjson -# ============================================================================= - -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: countly -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON ---- -apiVersion: v1 -kind: Secret -metadata: - name: countly-gar - namespace: kafka -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/prod-argo/kafka.yaml b/environments/prod-argo/kafka.yaml deleted file mode 100644 index ff6fe5e..0000000 --- a/environments/prod-argo/kafka.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Kafka overrides only. -# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/prod-argo/migration.yaml b/environments/prod-argo/migration.yaml deleted file mode 100644 index fddc542..0000000 --- a/environments/prod-argo/migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific migration overrides only. diff --git a/environments/prod-argo/mongodb.yaml b/environments/prod-argo/mongodb.yaml deleted file mode 100644 index ebe28cc..0000000 --- a/environments/prod-argo/mongodb.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific MongoDB overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/prod-argo/observability.yaml b/environments/prod-argo/observability.yaml deleted file mode 100644 index 95d895f..0000000 --- a/environments/prod-argo/observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific observability overrides only. diff --git a/environments/prod-argo/secrets.example.yaml b/environments/prod-argo/secrets.example.yaml deleted file mode 100644 index 181cb54..0000000 --- a/environments/prod-argo/secrets.example.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//credentials-countly.yaml) --- -secrets: - mode: values - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- -secrets: - mode: values -users: - admin: - enabled: true - password: "CHANGEME-super-admin" - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- -secrets: - mode: values -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//credentials-kafka.yaml) --- -secrets: - mode: values -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" - -# For External Secrets Operator, switch the per-chart file to: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore diff --git a/environments/prod-argo/secrets.sops.example.yaml b/environments/prod-argo/secrets.sops.example.yaml deleted file mode 100644 index 7335b8d..0000000 --- a/environments/prod-argo/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//credentials-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//credentials-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx diff --git a/environments/reference/countly.yaml b/environments/reference/countly.yaml index f01bf74..3d9ea4d 100644 --- a/environments/reference/countly.yaml +++ b/environments/reference/countly.yaml @@ -566,6 +566,15 @@ ingress: mode: "" # Set via argocd/customers/.yaml clusterIssuer: letsencrypt-prod # Used with mode=letsencrypt secretName: "" # Auto-derived if empty: -tls + externalSecret: + enabled: false # Create the TLS Secret from External Secrets when mode=existingSecret + refreshInterval: "1h" + secretStoreRef: + name: "" # e.g. gcp-secrets + kind: ClusterSecretStore + remoteRefs: + tlsCrt: "countly-prod-tls-crt" + tlsKey: "countly-prod-tls-key" selfSigned: issuerName: "" # Auto-derived if empty: -ca-issuer caSecretName: "" # Auto-derived if empty: -ca-keypair diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index b894d8f..8240e00 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -148,11 +148,6 @@ backingServices: mode: bundled EOF -cat > "${env_dir}/countly.yaml" <<'EOF' -# Customer-specific Countly overrides only. -# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. -EOF - cat > "${env_dir}/kafka.yaml" <<'EOF' # Customer-specific Kafka overrides only. # Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. @@ -177,6 +172,24 @@ cat > "${env_dir}/migration.yaml" <<'EOF' EOF if [[ "${secret_mode}" == "gcp-secrets" ]]; then + cat > "${env_dir}/countly.yaml" <<'EOF' +# Customer-specific Countly overrides only. +# TLS Secret Manager support is prewired below and becomes active only when: +# - argocd/customers/.yaml sets tls: provided +# - the shared Secret Manager keys exist +ingress: + tls: + externalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + tlsCrt: countly-prod-tls-crt + tlsKey: countly-prod-tls-key +EOF + cat > "${env_dir}/credentials-countly.yaml" < "${env_dir}/countly.yaml" <<'EOF' +# Customer-specific Countly overrides only. +# Leave this file minimal so sizing / TLS / observability / security profiles apply cleanly. +EOF + cat > "${env_dir}/credentials-countly.yaml" <<'EOF' # Countly secrets — FILL IN before first deploy # Passwords must match across charts (see secrets.example.yaml) From 63db10765dd8523419a2075fdb8b1e47e7fee243 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 17:38:18 +0530 Subject: [PATCH 75/79] Refresh reference environment examples --- environments/example-production/global.yaml | 2 +- environments/reference/README.md | 3 +++ environments/reference/countly-tls.env | 6 ++++-- .../reference/external-secrets.example.yaml | 16 ++++++++++++++++ .../reference/image-pull-secrets.example.yaml | 6 +++--- 5 files changed, 27 insertions(+), 6 deletions(-) diff --git a/environments/example-production/global.yaml b/environments/example-production/global.yaml index 500e3e9..9ed5760 100644 --- a/environments/example-production/global.yaml +++ b/environments/example-production/global.yaml @@ -19,7 +19,7 @@ global: remoteRef: key: customer-a-gar-dockerconfig imagePullSecrets: - - name: countly-gar + - name: countly-registry storageClass: gp3 ingress: diff --git a/environments/reference/README.md b/environments/reference/README.md index 65a5fb3..c83d349 100644 --- a/environments/reference/README.md +++ b/environments/reference/README.md @@ -18,6 +18,7 @@ This directory is a complete starting point for a new Countly deployment. - Choose `global.security`: `open` or `hardened` - Choose backing service modes (bundled or external) - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + - For `global.tls: provided`, either point Countly at a pre-created TLS secret or enable `ingress.tls.externalSecret` in `countly.yaml` 3. Fill in required secrets in the chart-specific files: - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` @@ -54,6 +55,7 @@ For production, choose one of: For private registries such as GAR, also create namespaced image pull secrets. Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. +You can use the same External Secrets pattern for Countly ingress TLS when `global.tls` is `provided`; see `countly.yaml` and `external-secrets.example.yaml`. ## Files @@ -70,6 +72,7 @@ If you use External Secrets Operator with Google Secret Manager, point `global.i | `credentials-clickhouse.yaml` | ClickHouse auth password | | `credentials-kafka.yaml` | Kafka Connect ClickHouse password | | `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | +| `countly-tls.env` | Manual TLS secret helper for bring-your-own certificate workflows | | `secrets.example.yaml` | Combined secrets reference (all charts in one file) | | `secrets.sops.example.yaml` | SOPS encryption guide | | `external-secrets.example.yaml` | External Secrets Operator guide | diff --git a/environments/reference/countly-tls.env b/environments/reference/countly-tls.env index dd467a5..568b026 100644 --- a/environments/reference/countly-tls.env +++ b/environments/reference/countly-tls.env @@ -1,7 +1,9 @@ # Countly TLS Certificate Configuration - Template -# Copy this file to countly-tls.env and update with real values +# Use this only for manual bring-your-own TLS secret workflows. +# If you use Secret Manager + External Secrets, prefer ingress.tls.externalSecret +# in countly.yaml instead of managing this file. # Base64 encoded TLS certificate (full chain) TLS_CRT= # Base64 encoded TLS private key -TLS_KEY= \ No newline at end of file +TLS_KEY= diff --git a/environments/reference/external-secrets.example.yaml b/environments/reference/external-secrets.example.yaml index ca3b53a..092f526 100644 --- a/environments/reference/external-secrets.example.yaml +++ b/environments/reference/external-secrets.example.yaml @@ -75,6 +75,22 @@ # remoteRef: # key: "acme-gar-dockerconfig" # +# For bring-your-own Countly TLS from Secret Manager, configure this in +# environments//countly.yaml and set global.tls=provided: +# +# ingress: +# tls: +# secretName: "countly-tls" +# externalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# tlsCrt: "countly-prod-tls-crt" +# tlsKey: "countly-prod-tls-key" +# # Prerequisites: # 1. Install External Secrets Operator: https://external-secrets.io/ # 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend diff --git a/environments/reference/image-pull-secrets.example.yaml b/environments/reference/image-pull-secrets.example.yaml index f1f537f..b1af340 100644 --- a/environments/reference/image-pull-secrets.example.yaml +++ b/environments/reference/image-pull-secrets.example.yaml @@ -7,7 +7,7 @@ # such as GCP Artifact Registry (GAR). # # Replace: -# - metadata.name with your actual secret name if not using "countly-gar" +# - metadata.name with your actual secret name if not using "countly-registry" # - namespaces if your releases run elsewhere # - .dockerconfigjson with the base64-encoded contents of your Docker config # @@ -25,7 +25,7 @@ apiVersion: v1 kind: Secret metadata: - name: countly-gar + name: countly-registry namespace: countly type: kubernetes.io/dockerconfigjson data: @@ -34,7 +34,7 @@ data: apiVersion: v1 kind: Secret metadata: - name: countly-gar + name: countly-registry namespace: kafka type: kubernetes.io/dockerconfigjson data: From 186eb4483521e5b967d51f3e26ed68a3b8a83bca Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 18:07:55 +0530 Subject: [PATCH 76/79] Add tls-argo customer --- argocd/customers/tls-argo.yaml | 16 +++ environments/tls-argo/README.md | 80 ++++++++++++++ environments/tls-argo/clickhouse.yaml | 2 + .../cluster-secret-store.gcp.example.yaml | 31 ++++++ environments/tls-argo/countly-tls.env | 9 ++ environments/tls-argo/countly.yaml | 16 +++ .../tls-argo/credentials-clickhouse.yaml | 10 ++ .../tls-argo/credentials-countly.yaml | 22 ++++ environments/tls-argo/credentials-kafka.yaml | 11 ++ .../tls-argo/credentials-migration.yaml | 2 + .../tls-argo/credentials-mongodb.yaml | 21 ++++ .../tls-argo/credentials-observability.yaml | 2 + .../tls-argo/external-secrets.example.yaml | 100 ++++++++++++++++++ environments/tls-argo/global.yaml | 39 +++++++ .../tls-argo/image-pull-secrets.example.yaml | 41 +++++++ environments/tls-argo/kafka.yaml | 2 + environments/tls-argo/migration.yaml | 1 + environments/tls-argo/mongodb.yaml | 2 + environments/tls-argo/observability.yaml | 1 + environments/tls-argo/secrets.example.yaml | 62 +++++++++++ .../tls-argo/secrets.sops.example.yaml | 21 ++++ 21 files changed, 491 insertions(+) create mode 100644 argocd/customers/tls-argo.yaml create mode 100644 environments/tls-argo/README.md create mode 100644 environments/tls-argo/clickhouse.yaml create mode 100644 environments/tls-argo/cluster-secret-store.gcp.example.yaml create mode 100644 environments/tls-argo/countly-tls.env create mode 100644 environments/tls-argo/countly.yaml create mode 100644 environments/tls-argo/credentials-clickhouse.yaml create mode 100644 environments/tls-argo/credentials-countly.yaml create mode 100644 environments/tls-argo/credentials-kafka.yaml create mode 100644 environments/tls-argo/credentials-migration.yaml create mode 100644 environments/tls-argo/credentials-mongodb.yaml create mode 100644 environments/tls-argo/credentials-observability.yaml create mode 100644 environments/tls-argo/external-secrets.example.yaml create mode 100644 environments/tls-argo/global.yaml create mode 100644 environments/tls-argo/image-pull-secrets.example.yaml create mode 100644 environments/tls-argo/kafka.yaml create mode 100644 environments/tls-argo/migration.yaml create mode 100644 environments/tls-argo/mongodb.yaml create mode 100644 environments/tls-argo/observability.yaml create mode 100644 environments/tls-argo/secrets.example.yaml create mode 100644 environments/tls-argo/secrets.sops.example.yaml diff --git a/argocd/customers/tls-argo.yaml b/argocd/customers/tls-argo.yaml new file mode 100644 index 0000000..8ff86b5 --- /dev/null +++ b/argocd/customers/tls-argo.yaml @@ -0,0 +1,16 @@ +customer: tls-argo +environment: tls-argo +project: countly-customers +server: https://34.70.85.2 +gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com +secretManagerProjectID: countly-tools +clusterProjectID: countly-dev-313620 +clusterName: tls-argo +clusterLocation: us-central1-a +hostname: tls-argo.count.ly +sizing: tier1 +security: open +tls: provided +observability: disabled +kafkaConnect: balanced +migration: disabled diff --git a/environments/tls-argo/README.md b/environments/tls-argo/README.md new file mode 100644 index 0000000..c83d349 --- /dev/null +++ b/environments/tls-argo/README.md @@ -0,0 +1,80 @@ +# Reference Environment + +This directory is a complete starting point for a new Countly deployment. + +## Quick Start + +1. Copy this directory: + ```bash + cp -r environments/reference environments/my-deployment + ``` + +2. Edit `global.yaml`: + - Set `ingress.hostname` to your domain + - Choose `global.sizing`: `local`, `small`, or `production` + - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` + - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` + - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` + - Choose `global.security`: `open` or `hardened` + - Choose backing service modes (bundled or external) + - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` + - For `global.tls: provided`, either point Countly at a pre-created TLS secret or enable `ingress.tls.externalSecret` in `countly.yaml` + +3. Fill in required secrets in the chart-specific files: + - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` + - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` + - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` + - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` + - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` + + Or use `secrets.example.yaml` as a complete reference. + +4. Register your environment in `helmfile.yaml.gotmpl`: + ```yaml + environments: + my-deployment: + values: + - environments/my-deployment/global.yaml + ``` + +5. Deploy: + ```bash + helmfile -e my-deployment apply + ``` + +## Secret Management + +See `secrets.example.yaml` for a complete list of all required secrets. + +For production, choose one of: +- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) +- **existingSecret**: Pre-create Kubernetes secrets and reference them +- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files +- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) + +For private registries such as GAR, also create namespaced image pull secrets. +Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. +If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. +You can use the same External Secrets pattern for Countly ingress TLS when `global.tls` is `provided`; see `countly.yaml` and `external-secrets.example.yaml`. + +## Files + +| File | Purpose | +|------|---------| +| `global.yaml` | Profile selectors, ingress, backing service modes | +| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | +| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | +| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | +| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | +| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | +| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | +| `credentials-mongodb.yaml` | MongoDB user passwords | +| `credentials-clickhouse.yaml` | ClickHouse auth password | +| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | +| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | +| `countly-tls.env` | Manual TLS secret helper for bring-your-own certificate workflows | +| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | +| `secrets.sops.example.yaml` | SOPS encryption guide | +| `external-secrets.example.yaml` | External Secrets Operator guide | +| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | +| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/tls-argo/clickhouse.yaml b/environments/tls-argo/clickhouse.yaml new file mode 100644 index 0000000..17291a9 --- /dev/null +++ b/environments/tls-argo/clickhouse.yaml @@ -0,0 +1,2 @@ +# Customer-specific ClickHouse overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/tls-argo/cluster-secret-store.gcp.example.yaml b/environments/tls-argo/cluster-secret-store.gcp.example.yaml new file mode 100644 index 0000000..7bb563f --- /dev/null +++ b/environments/tls-argo/cluster-secret-store.gcp.example.yaml @@ -0,0 +1,31 @@ +# ============================================================================= +# External Secrets Operator + Google Secret Manager +# ClusterSecretStore Example +# ============================================================================= +# Apply this once per cluster after External Secrets Operator is installed. +# +# Prerequisites: +# - The external-secrets controller service account is annotated for Workload +# Identity with a GCP service account that can read Secret Manager secrets. +# - The GCP service account has at least: +# roles/secretmanager.secretAccessor +# +# This file is a reference only. Adapt project IDs and names to your cluster. +# ============================================================================= + +apiVersion: external-secrets.io/v1 +kind: ClusterSecretStore +metadata: + name: gcp-secrets +spec: + provider: + gcpsm: + projectID: countly-dev-313620 + auth: + workloadIdentity: + clusterLocation: us-central1 + clusterName: change-me + clusterProjectID: countly-dev-313620 + serviceAccountRef: + name: external-secrets + namespace: external-secrets diff --git a/environments/tls-argo/countly-tls.env b/environments/tls-argo/countly-tls.env new file mode 100644 index 0000000..568b026 --- /dev/null +++ b/environments/tls-argo/countly-tls.env @@ -0,0 +1,9 @@ +# Countly TLS Certificate Configuration - Template +# Use this only for manual bring-your-own TLS secret workflows. +# If you use Secret Manager + External Secrets, prefer ingress.tls.externalSecret +# in countly.yaml instead of managing this file. + +# Base64 encoded TLS certificate (full chain) +TLS_CRT= +# Base64 encoded TLS private key +TLS_KEY= diff --git a/environments/tls-argo/countly.yaml b/environments/tls-argo/countly.yaml new file mode 100644 index 0000000..7ea0f35 --- /dev/null +++ b/environments/tls-argo/countly.yaml @@ -0,0 +1,16 @@ +# Customer-specific Countly overrides only. +# TLS Secret Manager support is prewired below and becomes active only when: +# - argocd/customers/.yaml sets tls: provided +# - the shared Secret Manager keys exist +ingress: + tls: + secretName: countly-tls + externalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + tlsCrt: tls-argo-tls-crt + tlsKey: tls-argo-tls-key diff --git a/environments/tls-argo/credentials-clickhouse.yaml b/environments/tls-argo/credentials-clickhouse.yaml new file mode 100644 index 0000000..665b934 --- /dev/null +++ b/environments/tls-argo/credentials-clickhouse.yaml @@ -0,0 +1,10 @@ +# ClickHouse secrets sourced from Google Secret Manager through External Secrets. +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + defaultUserPassword: "tls-argo-clickhouse-default-user-password" diff --git a/environments/tls-argo/credentials-countly.yaml b/environments/tls-argo/credentials-countly.yaml new file mode 100644 index 0000000..848a4b4 --- /dev/null +++ b/environments/tls-argo/credentials-countly.yaml @@ -0,0 +1,22 @@ +# Countly secrets sourced from Google Secret Manager through External Secrets. +secrets: + mode: externalSecret + clickhouse: + username: "default" + database: "countly_drill" + kafka: + securityProtocol: "PLAINTEXT" + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + common: + encryptionReportsKey: "tls-argo-countly-encryption-reports-key" + webSessionSecret: "tls-argo-countly-web-session-secret" + passwordSecret: "tls-argo-countly-password-secret" + clickhouse: + password: "tls-argo-countly-clickhouse-password" + mongodb: + password: "tls-argo-mongodb-app-password" diff --git a/environments/tls-argo/credentials-kafka.yaml b/environments/tls-argo/credentials-kafka.yaml new file mode 100644 index 0000000..9c29c46 --- /dev/null +++ b/environments/tls-argo/credentials-kafka.yaml @@ -0,0 +1,11 @@ +# Kafka secrets sourced from Google Secret Manager through External Secrets. +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + clickhouse: + password: "tls-argo-kafka-connect-clickhouse-password" diff --git a/environments/tls-argo/credentials-migration.yaml b/environments/tls-argo/credentials-migration.yaml new file mode 100644 index 0000000..6fe5890 --- /dev/null +++ b/environments/tls-argo/credentials-migration.yaml @@ -0,0 +1,2 @@ +# Migration secrets placeholder. +# Fill when `migration: enabled` is used for a customer. diff --git a/environments/tls-argo/credentials-mongodb.yaml b/environments/tls-argo/credentials-mongodb.yaml new file mode 100644 index 0000000..2c82d86 --- /dev/null +++ b/environments/tls-argo/credentials-mongodb.yaml @@ -0,0 +1,21 @@ +# MongoDB secrets sourced from Google Secret Manager through External Secrets. +secrets: + mode: externalSecret + externalSecret: + refreshInterval: "1h" + secretStoreRef: + name: gcp-secrets + kind: ClusterSecretStore + remoteRefs: + admin: + password: "tls-argo-mongodb-admin-password" + app: + password: "tls-argo-mongodb-app-password" + metrics: + password: "tls-argo-mongodb-metrics-password" + +users: + admin: + enabled: true + metrics: + enabled: true diff --git a/environments/tls-argo/credentials-observability.yaml b/environments/tls-argo/credentials-observability.yaml new file mode 100644 index 0000000..ad07ad2 --- /dev/null +++ b/environments/tls-argo/credentials-observability.yaml @@ -0,0 +1,2 @@ +# Observability secrets — typically none required for bundled mode +# Add external backend credentials here if using observability: external diff --git a/environments/tls-argo/external-secrets.example.yaml b/environments/tls-argo/external-secrets.example.yaml new file mode 100644 index 0000000..092f526 --- /dev/null +++ b/environments/tls-argo/external-secrets.example.yaml @@ -0,0 +1,100 @@ +# ============================================================================= +# External Secrets Operator (ESO) Configuration Example +# ============================================================================= +# When using secrets.mode=externalSecret, configure the ESO remoteRefs +# in the chart-specific secrets files under environments//: +# +# environments//credentials-countly.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# common: +# encryptionReportsKey: "acme-countly-encryption-reports-key" +# webSessionSecret: "acme-countly-web-session-secret" +# passwordSecret: "acme-countly-password-secret" +# clickhouse: +# password: "acme-countly-clickhouse-password" +# mongodb: +# password: "acme-mongodb-app-password" +# +# environments//credentials-clickhouse.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# defaultUserPassword: "acme-clickhouse-default-user-password" +# +# environments//credentials-kafka.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# clickhouse: +# password: "acme-kafka-connect-clickhouse-password" +# +# environments//credentials-mongodb.yaml +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# admin: +# password: "acme-mongodb-admin-password" +# app: +# password: "acme-mongodb-app-password" +# metrics: +# password: "acme-mongodb-metrics-password" +# +# For GAR image pulls, configure this in environments//global.yaml: +# +# global: +# imagePullSecrets: +# - name: countly-registry +# imagePullSecretExternalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRef: +# key: "acme-gar-dockerconfig" +# +# For bring-your-own Countly TLS from Secret Manager, configure this in +# environments//countly.yaml and set global.tls=provided: +# +# ingress: +# tls: +# secretName: "countly-tls" +# externalSecret: +# enabled: true +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore +# remoteRefs: +# tlsCrt: "countly-prod-tls-crt" +# tlsKey: "countly-prod-tls-key" +# +# Prerequisites: +# 1. Install External Secrets Operator: https://external-secrets.io/ +# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend +# 3. Ensure the ESO service account has access to the referenced secrets +# +# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, +# Azure Key Vault, and many more. diff --git a/environments/tls-argo/global.yaml b/environments/tls-argo/global.yaml new file mode 100644 index 0000000..175d5dd --- /dev/null +++ b/environments/tls-argo/global.yaml @@ -0,0 +1,39 @@ +# ============================================================================= +# Countly Deployment — Global Configuration +# ============================================================================= + +global: + sizing: tier1 + observability: disabled + kafkaConnect: balanced + tls: provided + security: open + + imageRegistry: "" + imageSource: + mode: gcpArtifactRegistry + gcpArtifactRegistry: + repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" + imagePullSecretExternalSecret: + enabled: true + refreshInterval: "1h" + secretStoreRef: + name: "gcp-secrets" + kind: ClusterSecretStore + remoteRef: + key: "customers-gcr-argo-gar-dockerconfig" + storageClass: "" + imagePullSecrets: + - name: countly-registry + +ingress: + hostname: tls-argo.count.ly + className: nginx + +backingServices: + mongodb: + mode: bundled + clickhouse: + mode: bundled + kafka: + mode: bundled diff --git a/environments/tls-argo/image-pull-secrets.example.yaml b/environments/tls-argo/image-pull-secrets.example.yaml new file mode 100644 index 0000000..b1af340 --- /dev/null +++ b/environments/tls-argo/image-pull-secrets.example.yaml @@ -0,0 +1,41 @@ +# ============================================================================= +# Image Pull Secrets Example +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. +# +# Use this when Countly and Kafka Connect pull from a private registry +# such as GCP Artifact Registry (GAR). +# +# Replace: +# - metadata.name with your actual secret name if not using "countly-registry" +# - namespaces if your releases run elsewhere +# - .dockerconfigjson with the base64-encoded contents of your Docker config +# +# You need one secret per namespace because imagePullSecrets are namespaced. +# For the default layout in this repo, create the same secret in: +# - countly +# - kafka +# +# Example source for the Docker config: +# cat ~/.docker/config.json | base64 | tr -d '\n' +# +# Kubernetes secret type must be: kubernetes.io/dockerconfigjson +# ============================================================================= + +apiVersion: v1 +kind: Secret +metadata: + name: countly-registry + namespace: countly +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON +--- +apiVersion: v1 +kind: Secret +metadata: + name: countly-registry + namespace: kafka +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/tls-argo/kafka.yaml b/environments/tls-argo/kafka.yaml new file mode 100644 index 0000000..ff6fe5e --- /dev/null +++ b/environments/tls-argo/kafka.yaml @@ -0,0 +1,2 @@ +# Customer-specific Kafka overrides only. +# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/tls-argo/migration.yaml b/environments/tls-argo/migration.yaml new file mode 100644 index 0000000..fddc542 --- /dev/null +++ b/environments/tls-argo/migration.yaml @@ -0,0 +1 @@ +# Customer-specific migration overrides only. diff --git a/environments/tls-argo/mongodb.yaml b/environments/tls-argo/mongodb.yaml new file mode 100644 index 0000000..ebe28cc --- /dev/null +++ b/environments/tls-argo/mongodb.yaml @@ -0,0 +1,2 @@ +# Customer-specific MongoDB overrides only. +# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/tls-argo/observability.yaml b/environments/tls-argo/observability.yaml new file mode 100644 index 0000000..95d895f --- /dev/null +++ b/environments/tls-argo/observability.yaml @@ -0,0 +1 @@ +# Customer-specific observability overrides only. diff --git a/environments/tls-argo/secrets.example.yaml b/environments/tls-argo/secrets.example.yaml new file mode 100644 index 0000000..181cb54 --- /dev/null +++ b/environments/tls-argo/secrets.example.yaml @@ -0,0 +1,62 @@ +# ============================================================================= +# Countly Deployment — Complete Secrets Reference +# ============================================================================= +# DO NOT COMMIT THIS FILE WITH REAL VALUES. +# +# This file documents ALL secrets required for a first-time install. +# Copy to your environment directory and fill in real values, then +# split into per-chart secret files (see below). +# +# For production deployments, use one of: +# - secrets.mode: existingSecret (pre-created Kubernetes secrets) +# - secrets.mode: externalSecret (External Secrets Operator) +# - SOPS encryption (see secrets.sops.example.yaml) +# ============================================================================= + +# --- countly chart (environments//credentials-countly.yaml) --- +secrets: + mode: values + common: + encryptionReportsKey: "CHANGEME-min-8-chars" + webSessionSecret: "CHANGEME-min-8-chars" + passwordSecret: "CHANGEME-min-8-chars" + clickhouse: + password: "CHANGEME-match-clickhouse-chart" + mongodb: + password: "CHANGEME-match-mongodb-chart" + +# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- +secrets: + mode: values +users: + admin: + enabled: true + password: "CHANGEME-super-admin" + app: + password: "CHANGEME-match-secrets.mongodb.password" + metrics: + password: "CHANGEME-metrics-exporter" + +# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- +secrets: + mode: values +auth: + defaultUserPassword: + password: "CHANGEME-match-secrets.clickhouse.password" + +# --- countly-kafka chart (environments//credentials-kafka.yaml) --- +secrets: + mode: values +kafkaConnect: + clickhouse: + password: "CHANGEME-match-clickhouse-password" + +# For External Secrets Operator, switch the per-chart file to: +# +# secrets: +# mode: externalSecret +# externalSecret: +# refreshInterval: "1h" +# secretStoreRef: +# name: gcp-secrets +# kind: ClusterSecretStore diff --git a/environments/tls-argo/secrets.sops.example.yaml b/environments/tls-argo/secrets.sops.example.yaml new file mode 100644 index 0000000..7335b8d --- /dev/null +++ b/environments/tls-argo/secrets.sops.example.yaml @@ -0,0 +1,21 @@ +# ============================================================================= +# SOPS Encrypted Secrets Example +# ============================================================================= +# Encrypt this file with SOPS before committing: +# sops --encrypt --in-place environments//credentials-countly.yaml +# +# Configure helmfile to decrypt with the helm-secrets plugin: +# values: +# - secrets://environments//credentials-countly.yaml +# +# See: https://github.com/jkroepke/helm-secrets +# ============================================================================= + +# This file would contain the same structure as secrets.example.yaml +# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). +# +# Example .sops.yaml configuration: +# creation_rules: +# - path_regex: .*secrets.*\.yaml$ +# age: >- +# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From 9836077ce464ae87d034bcbd785e9f7f5f4a63e3 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 18:29:05 +0530 Subject: [PATCH 77/79] Point Argo to main and remove tls-argo --- argocd/applicationsets/00-mongodb.yaml | 4 +- argocd/applicationsets/01-clickhouse.yaml | 4 +- argocd/applicationsets/02-kafka.yaml | 4 +- argocd/applicationsets/03-countly.yaml | 4 +- argocd/applicationsets/04-observability.yaml | 4 +- argocd/applicationsets/05-migration.yaml | 4 +- argocd/customers/tls-argo.yaml | 16 --- argocd/operators/00-cert-manager.yaml | 2 +- argocd/operators/01-mongodb-crds.yaml | 2 +- argocd/operators/02-mongodb-operator.yaml | 2 +- argocd/operators/03-clickhouse-operator.yaml | 2 +- argocd/operators/04-strimzi-operator.yaml | 2 +- argocd/operators/05-nginx-ingress.yaml | 4 +- .../06-letsencrypt-prod-issuer-app.yaml | 4 +- .../07-external-secrets-operator.yaml | 2 +- argocd/operators/08-cluster-secret-store.yaml | 4 +- argocd/root-application.yaml | 2 +- environments/tls-argo/README.md | 80 -------------- environments/tls-argo/clickhouse.yaml | 2 - .../cluster-secret-store.gcp.example.yaml | 31 ------ environments/tls-argo/countly-tls.env | 9 -- environments/tls-argo/countly.yaml | 16 --- .../tls-argo/credentials-clickhouse.yaml | 10 -- .../tls-argo/credentials-countly.yaml | 22 ---- environments/tls-argo/credentials-kafka.yaml | 11 -- .../tls-argo/credentials-migration.yaml | 2 - .../tls-argo/credentials-mongodb.yaml | 21 ---- .../tls-argo/credentials-observability.yaml | 2 - .../tls-argo/external-secrets.example.yaml | 100 ------------------ environments/tls-argo/global.yaml | 39 ------- .../tls-argo/image-pull-secrets.example.yaml | 41 ------- environments/tls-argo/kafka.yaml | 2 - environments/tls-argo/migration.yaml | 1 - environments/tls-argo/mongodb.yaml | 2 - environments/tls-argo/observability.yaml | 1 - environments/tls-argo/secrets.example.yaml | 62 ----------- .../tls-argo/secrets.sops.example.yaml | 21 ---- 37 files changed, 25 insertions(+), 516 deletions(-) delete mode 100644 argocd/customers/tls-argo.yaml delete mode 100644 environments/tls-argo/README.md delete mode 100644 environments/tls-argo/clickhouse.yaml delete mode 100644 environments/tls-argo/cluster-secret-store.gcp.example.yaml delete mode 100644 environments/tls-argo/countly-tls.env delete mode 100644 environments/tls-argo/countly.yaml delete mode 100644 environments/tls-argo/credentials-clickhouse.yaml delete mode 100644 environments/tls-argo/credentials-countly.yaml delete mode 100644 environments/tls-argo/credentials-kafka.yaml delete mode 100644 environments/tls-argo/credentials-migration.yaml delete mode 100644 environments/tls-argo/credentials-mongodb.yaml delete mode 100644 environments/tls-argo/credentials-observability.yaml delete mode 100644 environments/tls-argo/external-secrets.example.yaml delete mode 100644 environments/tls-argo/global.yaml delete mode 100644 environments/tls-argo/image-pull-secrets.example.yaml delete mode 100644 environments/tls-argo/kafka.yaml delete mode 100644 environments/tls-argo/migration.yaml delete mode 100644 environments/tls-argo/mongodb.yaml delete mode 100644 environments/tls-argo/observability.yaml delete mode 100644 environments/tls-argo/secrets.example.yaml delete mode 100644 environments/tls-argo/secrets.sops.example.yaml diff --git a/argocd/applicationsets/00-mongodb.yaml b/argocd/applicationsets/00-mongodb.yaml index b9de322..4cbd4e3 100644 --- a/argocd/applicationsets/00-mongodb.yaml +++ b/argocd/applicationsets/00-mongodb.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main path: charts/countly-mongodb helm: releaseName: countly-mongodb diff --git a/argocd/applicationsets/01-clickhouse.yaml b/argocd/applicationsets/01-clickhouse.yaml index b745de1..817fe76 100644 --- a/argocd/applicationsets/01-clickhouse.yaml +++ b/argocd/applicationsets/01-clickhouse.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main path: charts/countly-clickhouse helm: releaseName: countly-clickhouse diff --git a/argocd/applicationsets/02-kafka.yaml b/argocd/applicationsets/02-kafka.yaml index 2e22f8c..db700d9 100644 --- a/argocd/applicationsets/02-kafka.yaml +++ b/argocd/applicationsets/02-kafka.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main path: charts/countly-kafka helm: releaseName: countly-kafka diff --git a/argocd/applicationsets/03-countly.yaml b/argocd/applicationsets/03-countly.yaml index f01023d..3e80361 100644 --- a/argocd/applicationsets/03-countly.yaml +++ b/argocd/applicationsets/03-countly.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main path: charts/countly helm: releaseName: countly diff --git a/argocd/applicationsets/04-observability.yaml b/argocd/applicationsets/04-observability.yaml index 5303ca7..1d69582 100644 --- a/argocd/applicationsets/04-observability.yaml +++ b/argocd/applicationsets/04-observability.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main path: '{{ if eq .observability "disabled" }}charts/noop{{ else }}charts/countly-observability{{ end }}' helm: releaseName: countly-observability diff --git a/argocd/applicationsets/05-migration.yaml b/argocd/applicationsets/05-migration.yaml index 455308d..5bf7143 100644 --- a/argocd/applicationsets/05-migration.yaml +++ b/argocd/applicationsets/05-migration.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: "{{ .project }}" source: repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main path: '{{ if eq .migration "enabled" }}charts/countly-migration{{ else }}charts/noop{{ end }}' helm: releaseName: countly-migration diff --git a/argocd/customers/tls-argo.yaml b/argocd/customers/tls-argo.yaml deleted file mode 100644 index 8ff86b5..0000000 --- a/argocd/customers/tls-argo.yaml +++ /dev/null @@ -1,16 +0,0 @@ -customer: tls-argo -environment: tls-argo -project: countly-customers -server: https://34.70.85.2 -gcpServiceAccountEmail: gcr-argo@countly-01.iam.gserviceaccount.com -secretManagerProjectID: countly-tools -clusterProjectID: countly-dev-313620 -clusterName: tls-argo -clusterLocation: us-central1-a -hostname: tls-argo.count.ly -sizing: tier1 -security: open -tls: provided -observability: disabled -kafkaConnect: balanced -migration: disabled diff --git a/argocd/operators/00-cert-manager.yaml b/argocd/operators/00-cert-manager.yaml index 173a215..4bd96cd 100644 --- a/argocd/operators/00-cert-manager.yaml +++ b/argocd/operators/00-cert-manager.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/01-mongodb-crds.yaml b/argocd/operators/01-mongodb-crds.yaml index 71ae1de..ab09f9b 100644 --- a/argocd/operators/01-mongodb-crds.yaml +++ b/argocd/operators/01-mongodb-crds.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/02-mongodb-operator.yaml b/argocd/operators/02-mongodb-operator.yaml index 4e3ae6b..59a68df 100644 --- a/argocd/operators/02-mongodb-operator.yaml +++ b/argocd/operators/02-mongodb-operator.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/03-clickhouse-operator.yaml b/argocd/operators/03-clickhouse-operator.yaml index 6a289c0..569db87 100644 --- a/argocd/operators/03-clickhouse-operator.yaml +++ b/argocd/operators/03-clickhouse-operator.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/04-strimzi-operator.yaml b/argocd/operators/04-strimzi-operator.yaml index a49f917..3d83405 100644 --- a/argocd/operators/04-strimzi-operator.yaml +++ b/argocd/operators/04-strimzi-operator.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/05-nginx-ingress.yaml b/argocd/operators/05-nginx-ingress.yaml index 3d7b348..2883136 100644 --- a/argocd/operators/05-nginx-ingress.yaml +++ b/argocd/operators/05-nginx-ingress.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: @@ -29,7 +29,7 @@ spec: valueFiles: - $values/nginx-ingress-values.yaml - repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main ref: values destination: server: "{{ .server }}" diff --git a/argocd/operators/06-letsencrypt-prod-issuer-app.yaml b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml index 161d16b..6046cf4 100644 --- a/argocd/operators/06-letsencrypt-prod-issuer-app.yaml +++ b/argocd/operators/06-letsencrypt-prod-issuer-app.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: default source: repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main path: '{{ if eq .tls "letsencrypt" }}argocd/operator-manifests/letsencrypt-prod-issuer{{ else }}charts/noop{{ end }}' directory: recurse: true diff --git a/argocd/operators/07-external-secrets-operator.yaml b/argocd/operators/07-external-secrets-operator.yaml index de20652..f20e732 100644 --- a/argocd/operators/07-external-secrets-operator.yaml +++ b/argocd/operators/07-external-secrets-operator.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: diff --git a/argocd/operators/08-cluster-secret-store.yaml b/argocd/operators/08-cluster-secret-store.yaml index cdfeff4..7a39007 100644 --- a/argocd/operators/08-cluster-secret-store.yaml +++ b/argocd/operators/08-cluster-secret-store.yaml @@ -10,7 +10,7 @@ spec: generators: - git: repoURL: https://github.com/Countly/helm.git - revision: gcp-artifact-rep-integration + revision: main files: - path: argocd/customers/*.yaml template: @@ -22,7 +22,7 @@ spec: project: default source: repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main path: charts/countly-cluster-secret-store helm: releaseName: countly-cluster-secret-store diff --git a/argocd/root-application.yaml b/argocd/root-application.yaml index 2740352..a84dae0 100644 --- a/argocd/root-application.yaml +++ b/argocd/root-application.yaml @@ -7,7 +7,7 @@ spec: project: default source: repoURL: https://github.com/Countly/helm.git - targetRevision: gcp-artifact-rep-integration + targetRevision: main path: argocd directory: recurse: true diff --git a/environments/tls-argo/README.md b/environments/tls-argo/README.md deleted file mode 100644 index c83d349..0000000 --- a/environments/tls-argo/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Reference Environment - -This directory is a complete starting point for a new Countly deployment. - -## Quick Start - -1. Copy this directory: - ```bash - cp -r environments/reference environments/my-deployment - ``` - -2. Edit `global.yaml`: - - Set `ingress.hostname` to your domain - - Choose `global.sizing`: `local`, `small`, or `production` - - Choose `global.tls`: `none`, `letsencrypt`, `provided`, or `selfSigned` - - Choose `global.observability`: `disabled`, `full`, `external-grafana`, or `external` - - Choose `global.kafkaConnect`: `throughput`, `balanced`, or `low-latency` - - Choose `global.security`: `open` or `hardened` - - Choose backing service modes (bundled or external) - - For GAR, set `global.imageSource`, `global.imagePullSecrets`, and optionally `global.imagePullSecretExternalSecret` - - For `global.tls: provided`, either point Countly at a pre-created TLS secret or enable `ingress.tls.externalSecret` in `countly.yaml` - -3. Fill in required secrets in the chart-specific files: - - `credentials-countly.yaml` → `secrets.common.*` and `secrets.clickhouse.password`, `secrets.mongodb.password` - - `credentials-mongodb.yaml` → `users.app.password`, `users.metrics.password` - - `credentials-clickhouse.yaml` → `auth.defaultUserPassword.password` - - `credentials-kafka.yaml` → `kafkaConnect.clickhouse.password` - - `image-pull-secrets.example.yaml` → private registry pull secret manifests for `countly` and `kafka` - - Or use `secrets.example.yaml` as a complete reference. - -4. Register your environment in `helmfile.yaml.gotmpl`: - ```yaml - environments: - my-deployment: - values: - - environments/my-deployment/global.yaml - ``` - -5. Deploy: - ```bash - helmfile -e my-deployment apply - ``` - -## Secret Management - -See `secrets.example.yaml` for a complete list of all required secrets. - -For production, choose one of: -- **Direct values**: Fill credentials in chart-specific YAML files (split into `credentials-countly.yaml`, `credentials-mongodb.yaml`, etc.) -- **existingSecret**: Pre-create Kubernetes secrets and reference them -- **externalSecret**: Use External Secrets Operator and Secret Manager-backed remote refs in the same `credentials-*.yaml` files -- **SOPS**: Encrypt secret files with SOPS (see `secrets.sops.example.yaml`) - -For private registries such as GAR, also create namespaced image pull secrets. -Use `image-pull-secrets.example.yaml` as a starting point, then encrypt it with SOPS or manage it through your GitOps secret workflow. -If you use External Secrets Operator with Google Secret Manager, point `global.imagePullSecretExternalSecret.remoteRef.key` at a secret whose value is the Docker config JSON content for `us-docker.pkg.dev`. -You can use the same External Secrets pattern for Countly ingress TLS when `global.tls` is `provided`; see `countly.yaml` and `external-secrets.example.yaml`. - -## Files - -| File | Purpose | -|------|---------| -| `global.yaml` | Profile selectors, ingress, backing service modes | -| `countly.yaml` | All Countly chart values (components, config, ingress, network policy) | -| `mongodb.yaml` | MongoDB chart values (replica set, resources, exporter) | -| `clickhouse.yaml` | ClickHouse chart values (topology, auth, keeper) | -| `kafka.yaml` | Kafka chart values (brokers, controllers, connect, connectors) | -| `observability.yaml` | Observability chart values (signals, backends, Grafana, Alloy) | -| `credentials-countly.yaml` | Countly secrets (encryption keys, DB passwords) | -| `credentials-mongodb.yaml` | MongoDB user passwords | -| `credentials-clickhouse.yaml` | ClickHouse auth password | -| `credentials-kafka.yaml` | Kafka Connect ClickHouse password | -| `credentials-observability.yaml` | Observability secrets (external backend creds if needed) | -| `countly-tls.env` | Manual TLS secret helper for bring-your-own certificate workflows | -| `secrets.example.yaml` | Combined secrets reference (all charts in one file) | -| `secrets.sops.example.yaml` | SOPS encryption guide | -| `external-secrets.example.yaml` | External Secrets Operator guide | -| `image-pull-secrets.example.yaml` | Example GAR/private registry image pull secrets for `countly` and `kafka` | -| `cluster-secret-store.gcp.example.yaml` | Example `ClusterSecretStore` for Google Secret Manager with Workload Identity | diff --git a/environments/tls-argo/clickhouse.yaml b/environments/tls-argo/clickhouse.yaml deleted file mode 100644 index 17291a9..0000000 --- a/environments/tls-argo/clickhouse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific ClickHouse overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/tls-argo/cluster-secret-store.gcp.example.yaml b/environments/tls-argo/cluster-secret-store.gcp.example.yaml deleted file mode 100644 index 7bb563f..0000000 --- a/environments/tls-argo/cluster-secret-store.gcp.example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# ============================================================================= -# External Secrets Operator + Google Secret Manager -# ClusterSecretStore Example -# ============================================================================= -# Apply this once per cluster after External Secrets Operator is installed. -# -# Prerequisites: -# - The external-secrets controller service account is annotated for Workload -# Identity with a GCP service account that can read Secret Manager secrets. -# - The GCP service account has at least: -# roles/secretmanager.secretAccessor -# -# This file is a reference only. Adapt project IDs and names to your cluster. -# ============================================================================= - -apiVersion: external-secrets.io/v1 -kind: ClusterSecretStore -metadata: - name: gcp-secrets -spec: - provider: - gcpsm: - projectID: countly-dev-313620 - auth: - workloadIdentity: - clusterLocation: us-central1 - clusterName: change-me - clusterProjectID: countly-dev-313620 - serviceAccountRef: - name: external-secrets - namespace: external-secrets diff --git a/environments/tls-argo/countly-tls.env b/environments/tls-argo/countly-tls.env deleted file mode 100644 index 568b026..0000000 --- a/environments/tls-argo/countly-tls.env +++ /dev/null @@ -1,9 +0,0 @@ -# Countly TLS Certificate Configuration - Template -# Use this only for manual bring-your-own TLS secret workflows. -# If you use Secret Manager + External Secrets, prefer ingress.tls.externalSecret -# in countly.yaml instead of managing this file. - -# Base64 encoded TLS certificate (full chain) -TLS_CRT= -# Base64 encoded TLS private key -TLS_KEY= diff --git a/environments/tls-argo/countly.yaml b/environments/tls-argo/countly.yaml deleted file mode 100644 index 7ea0f35..0000000 --- a/environments/tls-argo/countly.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Customer-specific Countly overrides only. -# TLS Secret Manager support is prewired below and becomes active only when: -# - argocd/customers/.yaml sets tls: provided -# - the shared Secret Manager keys exist -ingress: - tls: - secretName: countly-tls - externalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - tlsCrt: tls-argo-tls-crt - tlsKey: tls-argo-tls-key diff --git a/environments/tls-argo/credentials-clickhouse.yaml b/environments/tls-argo/credentials-clickhouse.yaml deleted file mode 100644 index 665b934..0000000 --- a/environments/tls-argo/credentials-clickhouse.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# ClickHouse secrets sourced from Google Secret Manager through External Secrets. -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - defaultUserPassword: "tls-argo-clickhouse-default-user-password" diff --git a/environments/tls-argo/credentials-countly.yaml b/environments/tls-argo/credentials-countly.yaml deleted file mode 100644 index 848a4b4..0000000 --- a/environments/tls-argo/credentials-countly.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Countly secrets sourced from Google Secret Manager through External Secrets. -secrets: - mode: externalSecret - clickhouse: - username: "default" - database: "countly_drill" - kafka: - securityProtocol: "PLAINTEXT" - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - common: - encryptionReportsKey: "tls-argo-countly-encryption-reports-key" - webSessionSecret: "tls-argo-countly-web-session-secret" - passwordSecret: "tls-argo-countly-password-secret" - clickhouse: - password: "tls-argo-countly-clickhouse-password" - mongodb: - password: "tls-argo-mongodb-app-password" diff --git a/environments/tls-argo/credentials-kafka.yaml b/environments/tls-argo/credentials-kafka.yaml deleted file mode 100644 index 9c29c46..0000000 --- a/environments/tls-argo/credentials-kafka.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# Kafka secrets sourced from Google Secret Manager through External Secrets. -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - clickhouse: - password: "tls-argo-kafka-connect-clickhouse-password" diff --git a/environments/tls-argo/credentials-migration.yaml b/environments/tls-argo/credentials-migration.yaml deleted file mode 100644 index 6fe5890..0000000 --- a/environments/tls-argo/credentials-migration.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Migration secrets placeholder. -# Fill when `migration: enabled` is used for a customer. diff --git a/environments/tls-argo/credentials-mongodb.yaml b/environments/tls-argo/credentials-mongodb.yaml deleted file mode 100644 index 2c82d86..0000000 --- a/environments/tls-argo/credentials-mongodb.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# MongoDB secrets sourced from Google Secret Manager through External Secrets. -secrets: - mode: externalSecret - externalSecret: - refreshInterval: "1h" - secretStoreRef: - name: gcp-secrets - kind: ClusterSecretStore - remoteRefs: - admin: - password: "tls-argo-mongodb-admin-password" - app: - password: "tls-argo-mongodb-app-password" - metrics: - password: "tls-argo-mongodb-metrics-password" - -users: - admin: - enabled: true - metrics: - enabled: true diff --git a/environments/tls-argo/credentials-observability.yaml b/environments/tls-argo/credentials-observability.yaml deleted file mode 100644 index ad07ad2..0000000 --- a/environments/tls-argo/credentials-observability.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Observability secrets — typically none required for bundled mode -# Add external backend credentials here if using observability: external diff --git a/environments/tls-argo/external-secrets.example.yaml b/environments/tls-argo/external-secrets.example.yaml deleted file mode 100644 index 092f526..0000000 --- a/environments/tls-argo/external-secrets.example.yaml +++ /dev/null @@ -1,100 +0,0 @@ -# ============================================================================= -# External Secrets Operator (ESO) Configuration Example -# ============================================================================= -# When using secrets.mode=externalSecret, configure the ESO remoteRefs -# in the chart-specific secrets files under environments//: -# -# environments//credentials-countly.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# common: -# encryptionReportsKey: "acme-countly-encryption-reports-key" -# webSessionSecret: "acme-countly-web-session-secret" -# passwordSecret: "acme-countly-password-secret" -# clickhouse: -# password: "acme-countly-clickhouse-password" -# mongodb: -# password: "acme-mongodb-app-password" -# -# environments//credentials-clickhouse.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# defaultUserPassword: "acme-clickhouse-default-user-password" -# -# environments//credentials-kafka.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# clickhouse: -# password: "acme-kafka-connect-clickhouse-password" -# -# environments//credentials-mongodb.yaml -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# admin: -# password: "acme-mongodb-admin-password" -# app: -# password: "acme-mongodb-app-password" -# metrics: -# password: "acme-mongodb-metrics-password" -# -# For GAR image pulls, configure this in environments//global.yaml: -# -# global: -# imagePullSecrets: -# - name: countly-registry -# imagePullSecretExternalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRef: -# key: "acme-gar-dockerconfig" -# -# For bring-your-own Countly TLS from Secret Manager, configure this in -# environments//countly.yaml and set global.tls=provided: -# -# ingress: -# tls: -# secretName: "countly-tls" -# externalSecret: -# enabled: true -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore -# remoteRefs: -# tlsCrt: "countly-prod-tls-crt" -# tlsKey: "countly-prod-tls-key" -# -# Prerequisites: -# 1. Install External Secrets Operator: https://external-secrets.io/ -# 2. Create a SecretStore or ClusterSecretStore pointing to your secrets backend -# 3. Ensure the ESO service account has access to the referenced secrets -# -# Supported backends: AWS Secrets Manager, HashiCorp Vault, GCP Secret Manager, -# Azure Key Vault, and many more. diff --git a/environments/tls-argo/global.yaml b/environments/tls-argo/global.yaml deleted file mode 100644 index 175d5dd..0000000 --- a/environments/tls-argo/global.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# ============================================================================= -# Countly Deployment — Global Configuration -# ============================================================================= - -global: - sizing: tier1 - observability: disabled - kafkaConnect: balanced - tls: provided - security: open - - imageRegistry: "" - imageSource: - mode: gcpArtifactRegistry - gcpArtifactRegistry: - repositoryPrefix: "us-docker.pkg.dev/countly-01/countly-unified" - imagePullSecretExternalSecret: - enabled: true - refreshInterval: "1h" - secretStoreRef: - name: "gcp-secrets" - kind: ClusterSecretStore - remoteRef: - key: "customers-gcr-argo-gar-dockerconfig" - storageClass: "" - imagePullSecrets: - - name: countly-registry - -ingress: - hostname: tls-argo.count.ly - className: nginx - -backingServices: - mongodb: - mode: bundled - clickhouse: - mode: bundled - kafka: - mode: bundled diff --git a/environments/tls-argo/image-pull-secrets.example.yaml b/environments/tls-argo/image-pull-secrets.example.yaml deleted file mode 100644 index b1af340..0000000 --- a/environments/tls-argo/image-pull-secrets.example.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# ============================================================================= -# Image Pull Secrets Example -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES UNENCRYPTED. -# -# Use this when Countly and Kafka Connect pull from a private registry -# such as GCP Artifact Registry (GAR). -# -# Replace: -# - metadata.name with your actual secret name if not using "countly-registry" -# - namespaces if your releases run elsewhere -# - .dockerconfigjson with the base64-encoded contents of your Docker config -# -# You need one secret per namespace because imagePullSecrets are namespaced. -# For the default layout in this repo, create the same secret in: -# - countly -# - kafka -# -# Example source for the Docker config: -# cat ~/.docker/config.json | base64 | tr -d '\n' -# -# Kubernetes secret type must be: kubernetes.io/dockerconfigjson -# ============================================================================= - -apiVersion: v1 -kind: Secret -metadata: - name: countly-registry - namespace: countly -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON ---- -apiVersion: v1 -kind: Secret -metadata: - name: countly-registry - namespace: kafka -type: kubernetes.io/dockerconfigjson -data: - .dockerconfigjson: CHANGEME_BASE64_DOCKER_CONFIG_JSON diff --git a/environments/tls-argo/kafka.yaml b/environments/tls-argo/kafka.yaml deleted file mode 100644 index ff6fe5e..0000000 --- a/environments/tls-argo/kafka.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific Kafka overrides only. -# Leave this file minimal so sizing / kafka-connect / observability / security profiles apply cleanly. diff --git a/environments/tls-argo/migration.yaml b/environments/tls-argo/migration.yaml deleted file mode 100644 index fddc542..0000000 --- a/environments/tls-argo/migration.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific migration overrides only. diff --git a/environments/tls-argo/mongodb.yaml b/environments/tls-argo/mongodb.yaml deleted file mode 100644 index ebe28cc..0000000 --- a/environments/tls-argo/mongodb.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Customer-specific MongoDB overrides only. -# Leave this file minimal so sizing / security profiles apply cleanly. diff --git a/environments/tls-argo/observability.yaml b/environments/tls-argo/observability.yaml deleted file mode 100644 index 95d895f..0000000 --- a/environments/tls-argo/observability.yaml +++ /dev/null @@ -1 +0,0 @@ -# Customer-specific observability overrides only. diff --git a/environments/tls-argo/secrets.example.yaml b/environments/tls-argo/secrets.example.yaml deleted file mode 100644 index 181cb54..0000000 --- a/environments/tls-argo/secrets.example.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# ============================================================================= -# Countly Deployment — Complete Secrets Reference -# ============================================================================= -# DO NOT COMMIT THIS FILE WITH REAL VALUES. -# -# This file documents ALL secrets required for a first-time install. -# Copy to your environment directory and fill in real values, then -# split into per-chart secret files (see below). -# -# For production deployments, use one of: -# - secrets.mode: existingSecret (pre-created Kubernetes secrets) -# - secrets.mode: externalSecret (External Secrets Operator) -# - SOPS encryption (see secrets.sops.example.yaml) -# ============================================================================= - -# --- countly chart (environments//credentials-countly.yaml) --- -secrets: - mode: values - common: - encryptionReportsKey: "CHANGEME-min-8-chars" - webSessionSecret: "CHANGEME-min-8-chars" - passwordSecret: "CHANGEME-min-8-chars" - clickhouse: - password: "CHANGEME-match-clickhouse-chart" - mongodb: - password: "CHANGEME-match-mongodb-chart" - -# --- countly-mongodb chart (environments//credentials-mongodb.yaml) --- -secrets: - mode: values -users: - admin: - enabled: true - password: "CHANGEME-super-admin" - app: - password: "CHANGEME-match-secrets.mongodb.password" - metrics: - password: "CHANGEME-metrics-exporter" - -# --- countly-clickhouse chart (environments//credentials-clickhouse.yaml) --- -secrets: - mode: values -auth: - defaultUserPassword: - password: "CHANGEME-match-secrets.clickhouse.password" - -# --- countly-kafka chart (environments//credentials-kafka.yaml) --- -secrets: - mode: values -kafkaConnect: - clickhouse: - password: "CHANGEME-match-clickhouse-password" - -# For External Secrets Operator, switch the per-chart file to: -# -# secrets: -# mode: externalSecret -# externalSecret: -# refreshInterval: "1h" -# secretStoreRef: -# name: gcp-secrets -# kind: ClusterSecretStore diff --git a/environments/tls-argo/secrets.sops.example.yaml b/environments/tls-argo/secrets.sops.example.yaml deleted file mode 100644 index 7335b8d..0000000 --- a/environments/tls-argo/secrets.sops.example.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# ============================================================================= -# SOPS Encrypted Secrets Example -# ============================================================================= -# Encrypt this file with SOPS before committing: -# sops --encrypt --in-place environments//credentials-countly.yaml -# -# Configure helmfile to decrypt with the helm-secrets plugin: -# values: -# - secrets://environments//credentials-countly.yaml -# -# See: https://github.com/jkroepke/helm-secrets -# ============================================================================= - -# This file would contain the same structure as secrets.example.yaml -# but encrypted with SOPS (AGE, AWS KMS, GCP KMS, or Azure Key Vault). -# -# Example .sops.yaml configuration: -# creation_rules: -# - path_regex: .*secrets.*\.yaml$ -# age: >- -# age1xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx From 0f04fdc597794da2b6dbf9a318f06f34fe196d0a Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 18:44:43 +0530 Subject: [PATCH 78/79] Clarify shared TLS secret defaults --- README.md | 2 +- argocd/ONBOARDING.md | 2 ++ charts/countly/values.yaml | 2 ++ docs/DEPLOYMENT-MODES.md | 2 ++ environments/reference/countly.yaml | 2 ++ scripts/new-argocd-customer.sh | 4 ++++ 6 files changed, 13 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 82516ee..2a6bda1 100644 --- a/README.md +++ b/README.md @@ -179,7 +179,7 @@ For a GAR-backed production example, see [environments/example-production/global For GitOps-managed pull secrets, start from [environments/reference/image-pull-secrets.example.yaml](/Users/admin/cly/helm/environments/reference/image-pull-secrets.example.yaml) and encrypt or template it before committing. For Secret Manager + External Secrets Operator, set `global.imagePullSecretExternalSecret` in your environment `global.yaml` so Countly can create its namespaced `dockerconfigjson` pull secret. Application secrets can use the same pattern in `credentials-countly.yaml`, `credentials-kafka.yaml`, `credentials-clickhouse.yaml`, and `credentials-mongodb.yaml` by switching `secrets.mode` to `externalSecret` and filling `secrets.externalSecret.remoteRefs`. -Countly ingress TLS can also use the same pattern: set customer `tls: provided`, then enable `ingress.tls.externalSecret` in `countly.yaml` to materialize a `kubernetes.io/tls` secret from Secret Manager keys such as `countly-prod-tls-crt` and `countly-prod-tls-key`. +Countly ingress TLS can also use the same pattern: set customer `tls: provided`, then enable `ingress.tls.externalSecret` in `countly.yaml` to materialize a `kubernetes.io/tls` secret from Secret Manager. The default scaffold already points all customers at the shared keys `countly-prod-tls-crt` and `countly-prod-tls-key`; override them only when a customer needs a dedicated certificate. Recommended Secret Manager naming convention: - `-gar-dockerconfig` diff --git a/argocd/ONBOARDING.md b/argocd/ONBOARDING.md index aae0f9c..33e0fa3 100644 --- a/argocd/ONBOARDING.md +++ b/argocd/ONBOARDING.md @@ -356,6 +356,8 @@ If you want to use your own certificate instead of Let's Encrypt: - `countly-prod-tls-crt` - `countly-prod-tls-key` +This is the default path now. New customers generated in `gcp-secrets` mode already point at these shared TLS keys, so you do not need to rename them per customer unless a customer needs its own certificate. + Example: ```yaml diff --git a/charts/countly/values.yaml b/charts/countly/values.yaml index 3664588..159efc5 100644 --- a/charts/countly/values.yaml +++ b/charts/countly/values.yaml @@ -521,6 +521,8 @@ ingress: name: "" kind: ClusterSecretStore remoteRefs: + # Shared TLS certificate defaults for all customers. + # Override per customer only when a customer needs its own certificate. tlsCrt: "countly-prod-tls-crt" tlsKey: "countly-prod-tls-key" selfSigned: diff --git a/docs/DEPLOYMENT-MODES.md b/docs/DEPLOYMENT-MODES.md index 2be4c3a..0c75b78 100644 --- a/docs/DEPLOYMENT-MODES.md +++ b/docs/DEPLOYMENT-MODES.md @@ -42,6 +42,8 @@ ingress: name: gcp-secrets kind: ClusterSecretStore remoteRefs: + # Shared TLS keys for all customers by default. + # Override only for customer-specific certificates. tlsCrt: countly-prod-tls-crt tlsKey: countly-prod-tls-key ``` diff --git a/environments/reference/countly.yaml b/environments/reference/countly.yaml index 3d9ea4d..5ae7cfe 100644 --- a/environments/reference/countly.yaml +++ b/environments/reference/countly.yaml @@ -573,6 +573,8 @@ ingress: name: "" # e.g. gcp-secrets kind: ClusterSecretStore remoteRefs: + # Shared TLS certificate defaults for all customers. + # Override only if a specific customer needs its own certificate. tlsCrt: "countly-prod-tls-crt" tlsKey: "countly-prod-tls-key" selfSigned: diff --git a/scripts/new-argocd-customer.sh b/scripts/new-argocd-customer.sh index 8240e00..2061c04 100755 --- a/scripts/new-argocd-customer.sh +++ b/scripts/new-argocd-customer.sh @@ -177,6 +177,10 @@ if [[ "${secret_mode}" == "gcp-secrets" ]]; then # TLS Secret Manager support is prewired below and becomes active only when: # - argocd/customers/.yaml sets tls: provided # - the shared Secret Manager keys exist +# By default this reuses one shared certificate for every customer: +# - countly-prod-tls-crt +# - countly-prod-tls-key +# Override these remoteRefs only if a specific customer needs its own cert. ingress: tls: externalSecret: From b4cea8a518f7d1ebf4db2945d20b11abfaef2460 Mon Sep 17 00:00:00 2001 From: ihaardik Date: Thu, 2 Apr 2026 19:29:33 +0530 Subject: [PATCH 79/79] Fix chart render validation inputs --- .github/workflows/validate-charts.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/validate-charts.yml b/.github/workflows/validate-charts.yml index 9a64848..0ae7d00 100644 --- a/.github/workflows/validate-charts.yml +++ b/.github/workflows/validate-charts.yml @@ -104,10 +104,19 @@ jobs: ;; countly-mongodb) helm template test-release "${chart}" \ + --set users.admin.password=test \ --set users.app.password=test \ --set users.metrics.password=test \ > /dev/null || exit_code=1 ;; + countly-cluster-secret-store) + helm template test-release "${chart}" \ + --set secretStore.secretManagerProjectID=test-project \ + --set secretStore.clusterProjectID=test-cluster-project \ + --set secretStore.clusterName=test-cluster \ + --set secretStore.clusterLocation=test-location \ + > /dev/null || exit_code=1 + ;; countly-migration) helm template test-release "${chart}" \ --set backingServices.mongodb.password=test \