From 609194dedacbc38221d1063a4b5d6254094d9fb6 Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Tue, 16 Dec 2025 20:32:05 -0500 Subject: [PATCH 01/12] fix(e2e): add missing selectGreetingWorkflowItem method to Orchestrator class --- e2e-tests/playwright/support/pages/orchestrator.ts | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/e2e-tests/playwright/support/pages/orchestrator.ts b/e2e-tests/playwright/support/pages/orchestrator.ts index 96fa385e57..7da7d0cbf6 100644 --- a/e2e-tests/playwright/support/pages/orchestrator.ts +++ b/e2e-tests/playwright/support/pages/orchestrator.ts @@ -31,6 +31,16 @@ export class Orchestrator { await this.page.getByRole("link", { name: "User Onboarding" }).click(); } + async selectGreetingWorkflowItem() { + const workflowHeader = this.page.getByRole("heading", { + name: "Workflows", + }); + await expect(workflowHeader).toBeVisible(); + await expect(workflowHeader).toHaveText("Workflows"); + await expect(Workflows.workflowsTable(this.page)).toBeVisible(); + await this.page.getByRole("link", { name: "Greeting workflow" }).click(); + } + async getPageUrl() { return this.page.url(); } From 834e40d1344f4c702edba8fadf7ada8ca7dd82bf Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 09:10:48 -0500 Subject: [PATCH 02/12] fix(e2e): update workflows table selector to match actual UI The orchestrator workflows table selector was looking for "WorkflowsNameCategoryLast" but the actual UI only displays columns: Name, Workflow Status, Last run, Last run status, Description, Actions. The "Category" column does not exist in the release-1.8 UI, causing the orchestrator RBAC tests to fail with element not found errors. This fix updates the selector to match the actual table header text "Workflows" which is present in the UI. Backported from commit f17d95b0 (PR #3406) in main branch. Fixes failing test: - Test Orchestrator RBAC > Test global orchestrator workflow access is allowed Related: FLPATH-2798 --- e2e-tests/playwright/support/pages/workflows.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e-tests/playwright/support/pages/workflows.ts b/e2e-tests/playwright/support/pages/workflows.ts index 2f929d1d83..dfe0bf9952 100644 --- a/e2e-tests/playwright/support/pages/workflows.ts +++ b/e2e-tests/playwright/support/pages/workflows.ts @@ -3,7 +3,7 @@ import { Page } from "@playwright/test"; const workflowsTable = (page: Page) => page .locator("#root div") - .filter({ hasText: "WorkflowsNameCategoryLast" }) + .filter({ hasText: "Workflows" }) .nth(2); const WORKFLOWS = { From 39c8df0d0cd1a3428fddccb63212b09cdf55a6dd Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 09:26:03 -0500 Subject: [PATCH 03/12] fix(e2e): add wait and timeout to orchestrator greeting workflow helm install Add --wait --timeout=5m flags to the greeting workflow helm install command to ensure workflow pods are ready before tests execute. Without --wait, the helm command returns immediately while pods are still initializing, which can cause: - Tests to run before workflows are available - Race conditions between workflow deployment and test execution - Pods experiencing CreateContainerConfigError during startup With --wait, helm monitors the release and only returns success when all pods are Running and pass readiness probes. The 5-minute timeout provides ample time for the pods to start (observed ready time: ~90 seconds). This ensures tests only run against fully-initialized infrastructure and provides clearer failure messages if pods cannot start. Related: FLPATH-2798 --- .ibm/pipelines/utils.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 2b2988becd..b8d743f7e4 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -1263,7 +1263,7 @@ deploy_orchestrator_workflows() { oc apply -f "${WORKFLOW_MANIFESTS}" helm repo add orchestrator-workflows https://rhdhorchestrator.io/serverless-workflows - helm install greeting orchestrator-workflows/greeting -n "$namespace" + helm install greeting orchestrator-workflows/greeting -n "$namespace" --wait --timeout=5m until [[ $(oc get sf -n "$namespace" --no-headers 2> /dev/null | wc -l) -eq 2 ]]; do echo "No sf resources found. Retrying in 5 seconds..." From ae1ea89ea8fd2e0708ef60be72674df8399365b4 Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 11:24:02 -0500 Subject: [PATCH 04/12] style: fix prettier formatting in workflows.ts --- e2e-tests/playwright/support/pages/workflows.ts | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/e2e-tests/playwright/support/pages/workflows.ts b/e2e-tests/playwright/support/pages/workflows.ts index dfe0bf9952..33e0ba12e7 100644 --- a/e2e-tests/playwright/support/pages/workflows.ts +++ b/e2e-tests/playwright/support/pages/workflows.ts @@ -1,10 +1,7 @@ import { Page } from "@playwright/test"; const workflowsTable = (page: Page) => - page - .locator("#root div") - .filter({ hasText: "Workflows" }) - .nth(2); + page.locator("#root div").filter({ hasText: "Workflows" }).nth(2); const WORKFLOWS = { workflowsTable, From 80a7c4aadb87875fce868cab5bdcac7b9ac22498 Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 13:00:49 -0500 Subject: [PATCH 05/12] fix(ci): add SSL support for external PostgreSQL in sonataflow database creation Add manual database creation workaround for showcase-rbac deployment to handle SSL-required connections to external Crunchy Data PostgreSQL clusters. The helm chart's create-sonataflow-database job does not inject PGSSLMODE environment variable, causing authentication failures when connecting to external PostgreSQL instances that require SSL (Crunchy Data operator). This fix adds: - create_sonataflow_database_with_ssl() helper function - Temporary pod that runs psql with PGSSLMODE=require - Proper SSL configuration from postgres-cred secret Without SSL configuration: FATAL: no pg_hba.conf entry for host "X.X.X.X", user "janus-idp", database "postgres", no encryption This resolves CrashLoopBackOff issues in showcase-rbac namespace for: - greeting workflow - user-onboarding workflow - sonataflow-platform-data-index-service - sonataflow-platform-jobs-service Related: FLPATH-2798 --- .ibm/pipelines/utils.sh | 95 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 2 deletions(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index b8d743f7e4..970cd73eb0 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -818,6 +818,91 @@ perform_helm_install() { $(get_image_helm_set_params) } +# Helper function to manually create sonataflow database with SSL support +# This is a workaround for the helm chart's create-db job not including PGSSLMODE env var +create_sonataflow_database_with_ssl() { + local namespace=$1 + + echo "Manually creating sonataflow database with SSL support..." + + # Create a temporary pod to run psql with SSL + cat </dev/null || echo "NotFound") + if [[ "$status" == "Succeeded" ]] || [[ "$status" == "Failed" ]]; then + break + fi + sleep 5 + elapsed=$((elapsed + 5)) + done + + # Check logs + echo "Database creation output:" + oc logs create-sonataflow-db-manual -n "${namespace}" 2>/dev/null || echo "Could not retrieve logs" + + # Clean up the pod + oc delete pod create-sonataflow-db-manual -n "${namespace}" --ignore-not-found=true + + echo "Manual database creation completed" +} + base_deployment() { configure_namespace ${NAME_SPACE} @@ -843,12 +928,18 @@ rbac_deployment() { echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${RELEASE_NAME_RBAC}" perform_helm_install "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC}" "${HELM_CHART_RBAC_VALUE_FILE_NAME}" - # NOTE: This is a workaround to allow the sonataflow platform to connect to the external postgres db using ssl. + # NOTE: The helm chart's create-sonataflow-database job will fail because it doesn't include PGSSLMODE env var. + # We wait for the job to be created (indicating helm install is progressing), then manually create the database with SSL. until [[ $(oc get jobs -n "${NAME_SPACE_RBAC}" 2> /dev/null | grep "${RELEASE_NAME_RBAC}-create-sonataflow-database" | wc -l) -eq 1 ]]; do echo "Waiting for sf db creation job to be created. Retrying in 5 seconds..." sleep 5 done - oc wait --for=condition=complete job/"${RELEASE_NAME_RBAC}-create-sonataflow-database" -n "${NAME_SPACE_RBAC}" --timeout=3m + + # Don't wait for the helm job to complete - it will fail due to missing SSL configuration + # Instead, manually create the database with proper SSL support + create_sonataflow_database_with_ssl "${NAME_SPACE_RBAC}" + + # Patch the sonataflow platform to configure SSL for the jobs service oc -n "${NAME_SPACE_RBAC}" patch sfp sonataflow-platform --type=merge \ -p '{"spec":{"services":{"jobService":{"podTemplate":{"container":{"env":[{"name":"QUARKUS_DATASOURCE_REACTIVE_URL","value":"postgresql://postgress-external-db-primary.postgress-external-db.svc.cluster.local:5432/sonataflow?search_path=jobs-service&sslmode=require&ssl=true&trustAll=true"},{"name":"QUARKUS_DATASOURCE_REACTIVE_SSL_MODE","value":"require"},{"name":"QUARKUS_DATASOURCE_REACTIVE_TRUST_ALL","value":"true"}]}}}}}}' oc rollout restart deployment/sonataflow-platform-jobs-service -n "${NAME_SPACE_RBAC}" From b84c050a8fb6a808dd0b0ff78467e47808b3867e Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 14:56:10 -0500 Subject: [PATCH 06/12] fix(ci): improve sonataflow database creation reliability - Increase timeout from 2 minutes to 5 minutes to handle image pull delays and rate limiting - Add database verification step to confirm successful creation - Improve status reporting during pod creation with status change logging - Add wait for jobs-service rollout before deploying workflows to prevent race conditions - Better error handling and logging throughout the process This addresses issues where the manual database creation pod was timing out due to ImagePullBackOff delays (QPS exceeded) in the CI environment. --- .ibm/pipelines/utils.sh | 123 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 119 insertions(+), 4 deletions(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 970cd73eb0..60eca35398 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -879,30 +879,132 @@ EOF # Wait for the pod to start and complete echo "Waiting for manual database creation pod to complete..." - sleep 2 - # Wait up to 2 minutes for completion - local timeout=120 + # Wait up to 5 minutes for completion (accounts for image pull delays, rate limiting, etc.) + local timeout=300 local elapsed=0 + local last_status="" + while [ $elapsed -lt $timeout ]; do local status=$(oc get pod create-sonataflow-db-manual -n "${namespace}" -o jsonpath='{.status.phase}' 2>/dev/null || echo "NotFound") - if [[ "$status" == "Succeeded" ]] || [[ "$status" == "Failed" ]]; then + + # Print status changes + if [[ "$status" != "$last_status" ]]; then + echo "Pod status: $status (elapsed: ${elapsed}s)" + last_status="$status" + fi + + if [[ "$status" == "Succeeded" ]]; then + echo "Database creation pod completed successfully" + break + elif [[ "$status" == "Failed" ]]; then + echo "WARNING: Database creation pod failed" break fi + sleep 5 elapsed=$((elapsed + 5)) done + if [ $elapsed -ge $timeout ]; then + echo "WARNING: Timeout waiting for database creation pod (${timeout}s elapsed)" + fi + # Check logs echo "Database creation output:" oc logs create-sonataflow-db-manual -n "${namespace}" 2>/dev/null || echo "Could not retrieve logs" + # Check pod status for troubleshooting + echo "Final pod status:" + oc get pod create-sonataflow-db-manual -n "${namespace}" -o jsonpath='{.status.containerStatuses[0].state}' 2>/dev/null || echo "Could not get pod status" + echo "" + # Clean up the pod oc delete pod create-sonataflow-db-manual -n "${namespace}" --ignore-not-found=true echo "Manual database creation completed" } +# Verify that the sonataflow database exists +verify_sonataflow_database() { + local namespace=$1 + + echo "Verifying sonataflow database exists..." + + # Create a verification pod + cat </dev/null || echo "NotFound") + if [[ "$status" == "Succeeded" ]] || [[ "$status" == "Failed" ]]; then + break + fi + sleep 3 + elapsed=$((elapsed + 3)) + done + + # Check the result + local verification_output=$(oc logs verify-sonataflow-db -n "${namespace}" 2>/dev/null) + echo "$verification_output" + + # Clean up + oc delete pod verify-sonataflow-db -n "${namespace}" --ignore-not-found=true + + # Return success if database exists + if echo "$verification_output" | grep -q "DATABASE EXISTS"; then + echo "✓ Database verification successful" + return 0 + else + echo "✗ Database verification failed" + return 1 + fi +} + base_deployment() { configure_namespace ${NAME_SPACE} @@ -939,11 +1041,24 @@ rbac_deployment() { # Instead, manually create the database with proper SSL support create_sonataflow_database_with_ssl "${NAME_SPACE_RBAC}" + # Verify the database was created successfully + if ! verify_sonataflow_database "${NAME_SPACE_RBAC}"; then + echo "ERROR: Failed to verify sonataflow database creation. Workflows may fail to start." + echo "Attempting to continue anyway..." + fi + # Patch the sonataflow platform to configure SSL for the jobs service + echo "Patching SonataFlowPlatform with SSL configuration..." oc -n "${NAME_SPACE_RBAC}" patch sfp sonataflow-platform --type=merge \ -p '{"spec":{"services":{"jobService":{"podTemplate":{"container":{"env":[{"name":"QUARKUS_DATASOURCE_REACTIVE_URL","value":"postgresql://postgress-external-db-primary.postgress-external-db.svc.cluster.local:5432/sonataflow?search_path=jobs-service&sslmode=require&ssl=true&trustAll=true"},{"name":"QUARKUS_DATASOURCE_REACTIVE_SSL_MODE","value":"require"},{"name":"QUARKUS_DATASOURCE_REACTIVE_TRUST_ALL","value":"true"}]}}}}}}' + + echo "Restarting jobs-service deployment..." oc rollout restart deployment/sonataflow-platform-jobs-service -n "${NAME_SPACE_RBAC}" + # Wait for jobs-service to be ready before deploying workflows + echo "Waiting for jobs-service to be ready..." + oc rollout status deployment/sonataflow-platform-jobs-service -n "${NAME_SPACE_RBAC}" --timeout=3m || echo "WARNING: jobs-service rollout did not complete in time" + # initiate orchestrator workflows deployment deploy_orchestrator_workflows "${NAME_SPACE_RBAC}" } From 200a1c2a430cdeffc3a65f730ae45e5d2935f10a Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 15:01:51 -0500 Subject: [PATCH 07/12] style: fix ShellCheck SC2155 warnings in utils.sh Separate variable declarations from assignments to avoid masking return values. This resolves ShellCheck warnings in: - create_sonataflow_database_with_ssl() function (line 889) - verify_sonataflow_database() function (lines 983, 992) --- .ibm/pipelines/utils.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 60eca35398..1218a19537 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -884,9 +884,10 @@ EOF local timeout=300 local elapsed=0 local last_status="" + local status while [ $elapsed -lt $timeout ]; do - local status=$(oc get pod create-sonataflow-db-manual -n "${namespace}" -o jsonpath='{.status.phase}' 2>/dev/null || echo "NotFound") + status=$(oc get pod create-sonataflow-db-manual -n "${namespace}" -o jsonpath='{.status.phase}' 2>/dev/null || echo "NotFound") # Print status changes if [[ "$status" != "$last_status" ]]; then @@ -979,8 +980,9 @@ EOF # Wait for completion (shorter timeout since image should be cached) local timeout=60 local elapsed=0 + local status while [ $elapsed -lt $timeout ]; do - local status=$(oc get pod verify-sonataflow-db -n "${namespace}" -o jsonpath='{.status.phase}' 2>/dev/null || echo "NotFound") + status=$(oc get pod verify-sonataflow-db -n "${namespace}" -o jsonpath='{.status.phase}' 2>/dev/null || echo "NotFound") if [[ "$status" == "Succeeded" ]] || [[ "$status" == "Failed" ]]; then break fi @@ -989,7 +991,8 @@ EOF done # Check the result - local verification_output=$(oc logs verify-sonataflow-db -n "${namespace}" 2>/dev/null) + local verification_output + verification_output=$(oc logs verify-sonataflow-db -n "${namespace}" 2>/dev/null) echo "$verification_output" # Clean up From f73f1d1a625e4e51bd956579a142c7866a8928fa Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 15:15:38 -0500 Subject: [PATCH 08/12] fix(ci): add proper error returns for database creation failures - Return error code 1 when database creation pod fails - Return error code 1 when database creation times out - Clean up pod and show logs before returning on failure - Change WARNING to ERROR for actual failure cases --- .ibm/pipelines/utils.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 1218a19537..116dce50a2 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -899,8 +899,10 @@ EOF echo "Database creation pod completed successfully" break elif [[ "$status" == "Failed" ]]; then - echo "WARNING: Database creation pod failed" - break + echo "ERROR: Database creation pod failed" + oc logs create-sonataflow-db-manual -n "${namespace}" 2>/dev/null || echo "Could not retrieve logs" + oc delete pod create-sonataflow-db-manual -n "${namespace}" --ignore-not-found=true + return 1 fi sleep 5 @@ -908,22 +910,20 @@ EOF done if [ $elapsed -ge $timeout ]; then - echo "WARNING: Timeout waiting for database creation pod (${timeout}s elapsed)" + echo "ERROR: Timeout waiting for database creation pod (${timeout}s elapsed)" + oc logs create-sonataflow-db-manual -n "${namespace}" 2>/dev/null || echo "Could not retrieve logs" + oc delete pod create-sonataflow-db-manual -n "${namespace}" --ignore-not-found=true + return 1 fi - # Check logs + # Check logs for successful completion echo "Database creation output:" oc logs create-sonataflow-db-manual -n "${namespace}" 2>/dev/null || echo "Could not retrieve logs" - # Check pod status for troubleshooting - echo "Final pod status:" - oc get pod create-sonataflow-db-manual -n "${namespace}" -o jsonpath='{.status.containerStatuses[0].state}' 2>/dev/null || echo "Could not get pod status" - echo "" - # Clean up the pod oc delete pod create-sonataflow-db-manual -n "${namespace}" --ignore-not-found=true - echo "Manual database creation completed" + echo "Manual database creation completed successfully" } # Verify that the sonataflow database exists From 42071f25b753d61c76290ec43226880e4aa778f1 Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 15:27:24 -0500 Subject: [PATCH 09/12] style: fix prettier formatting in utils.sh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- .ibm/pipelines/utils.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 116dce50a2..0f18f7ae5e 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -826,7 +826,7 @@ create_sonataflow_database_with_ssl() { echo "Manually creating sonataflow database with SSL support..." # Create a temporary pod to run psql with SSL - cat </dev/null || echo "NotFound") + status=$(oc get pod create-sonataflow-db-manual -n "${namespace}" -o jsonpath='{.status.phase}' 2> /dev/null || echo "NotFound") # Print status changes if [[ "$status" != "$last_status" ]]; then @@ -900,7 +900,7 @@ EOF break elif [[ "$status" == "Failed" ]]; then echo "ERROR: Database creation pod failed" - oc logs create-sonataflow-db-manual -n "${namespace}" 2>/dev/null || echo "Could not retrieve logs" + oc logs create-sonataflow-db-manual -n "${namespace}" 2> /dev/null || echo "Could not retrieve logs" oc delete pod create-sonataflow-db-manual -n "${namespace}" --ignore-not-found=true return 1 fi @@ -911,14 +911,14 @@ EOF if [ $elapsed -ge $timeout ]; then echo "ERROR: Timeout waiting for database creation pod (${timeout}s elapsed)" - oc logs create-sonataflow-db-manual -n "${namespace}" 2>/dev/null || echo "Could not retrieve logs" + oc logs create-sonataflow-db-manual -n "${namespace}" 2> /dev/null || echo "Could not retrieve logs" oc delete pod create-sonataflow-db-manual -n "${namespace}" --ignore-not-found=true return 1 fi # Check logs for successful completion echo "Database creation output:" - oc logs create-sonataflow-db-manual -n "${namespace}" 2>/dev/null || echo "Could not retrieve logs" + oc logs create-sonataflow-db-manual -n "${namespace}" 2> /dev/null || echo "Could not retrieve logs" # Clean up the pod oc delete pod create-sonataflow-db-manual -n "${namespace}" --ignore-not-found=true @@ -933,7 +933,7 @@ verify_sonataflow_database() { echo "Verifying sonataflow database exists..." # Create a verification pod - cat </dev/null || echo "NotFound") + status=$(oc get pod verify-sonataflow-db -n "${namespace}" -o jsonpath='{.status.phase}' 2> /dev/null || echo "NotFound") if [[ "$status" == "Succeeded" ]] || [[ "$status" == "Failed" ]]; then break fi @@ -992,7 +992,7 @@ EOF # Check the result local verification_output - verification_output=$(oc logs verify-sonataflow-db -n "${namespace}" 2>/dev/null) + verification_output=$(oc logs verify-sonataflow-db -n "${namespace}" 2> /dev/null) echo "$verification_output" # Clean up From 9f7179c489c8263ed135347d4309f27b51b44422 Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 17:11:26 -0500 Subject: [PATCH 10/12] fix(ci): remove readOnlyRootFilesystem from database creation pod MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The securityContext with readOnlyRootFilesystem: true was preventing psql from working properly because it needs to write temporary files to /tmp during SSL connections to the external PostgreSQL database. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- .ibm/pipelines/utils.sh | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/.ibm/pipelines/utils.sh b/.ibm/pipelines/utils.sh index 0f18f7ae5e..13f01c9e37 100755 --- a/.ibm/pipelines/utils.sh +++ b/.ibm/pipelines/utils.sh @@ -837,13 +837,8 @@ spec: containers: - name: psql image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1 - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - runAsNonRoot: true - capabilities: - drop: - - ALL + # Note: We intentionally do not set readOnlyRootFilesystem here because + # psql requires write access to /tmp for temporary files during SSL connections command: ["sh", "-c"] args: - | From a86b5748f359f513d11d98ff0371f2f0c9dca663 Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Wed, 17 Dec 2025 20:19:11 -0500 Subject: [PATCH 11/12] fix(ci): increase dynamic-plugins-root volume to 5Gi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The default 2Gi ephemeral volume for dynamic-plugins-root is insufficient when many plugins are enabled (orchestrator, kubernetes, tekton, techdocs, keycloak, etc.). The init container fails with "No space left on device" error during plugin extraction. Increase the volume size to 5Gi for both showcase and RBAC namespaces using the deployment.patch field in the Backstage CR. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- .../resources/rhdh-operator/rhdh-start-rbac.yaml | 15 +++++++++++++++ .../resources/rhdh-operator/rhdh-start.yaml | 15 +++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/.ibm/pipelines/resources/rhdh-operator/rhdh-start-rbac.yaml b/.ibm/pipelines/resources/rhdh-operator/rhdh-start-rbac.yaml index ac407ba8af..45378fb4a6 100644 --- a/.ibm/pipelines/resources/rhdh-operator/rhdh-start-rbac.yaml +++ b/.ibm/pipelines/resources/rhdh-operator/rhdh-start-rbac.yaml @@ -24,3 +24,18 @@ spec: configMaps: - name: rbac-policy mountPath: /opt/app-root/src/rbac + deployment: + patch: + spec: + template: + spec: + volumes: + - name: dynamic-plugins-root + ephemeral: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi diff --git a/.ibm/pipelines/resources/rhdh-operator/rhdh-start.yaml b/.ibm/pipelines/resources/rhdh-operator/rhdh-start.yaml index 0beec531e9..823e07a749 100644 --- a/.ibm/pipelines/resources/rhdh-operator/rhdh-start.yaml +++ b/.ibm/pipelines/resources/rhdh-operator/rhdh-start.yaml @@ -24,3 +24,18 @@ spec: secrets: - name: rhdh-secrets - name: redis-secret + deployment: + patch: + spec: + template: + spec: + volumes: + - name: dynamic-plugins-root + ephemeral: + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi From c6a92033635b2fb50e8bfd1d2df35d9506f5a8fe Mon Sep 17 00:00:00 2001 From: Chad Crum Date: Thu, 18 Dec 2025 05:42:51 -0500 Subject: [PATCH 12/12] fix(e2e): increase Keycloak login timeout to 30 seconds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The default 10-second actionTimeout was being exceeded when the Keycloak popup was slow to render, causing orchestrator RBAC tests to fail during authentication setup. Add explicit waitFor with 30-second timeout before interacting with the Keycloak login form to handle slow responses. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- e2e-tests/playwright/utils/common.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/e2e-tests/playwright/utils/common.ts b/e2e-tests/playwright/utils/common.ts index 8b20bafdc5..6be5f861c8 100644 --- a/e2e-tests/playwright/utils/common.ts +++ b/e2e-tests/playwright/utils/common.ts @@ -84,6 +84,10 @@ export class Common { await new Promise((resolve) => { this.page.once("popup", async (popup) => { await popup.waitForLoadState(); + // Wait for login button to be visible with longer timeout for slow Keycloak responses + await popup + .locator("#kc-login") + .waitFor({ state: "visible", timeout: 30000 }); await popup.locator("#username").fill(userid); await popup.locator("#password").fill(password); await popup.locator("#kc-login").click();