-
-
Notifications
You must be signed in to change notification settings - Fork 1.2k
fix(webapp): auto-recover replication services after stream errors #3613
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
8db3b86
db87b8d
ebef65d
9fdc145
5944ff6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,6 @@ | ||
| --- | ||
| area: webapp | ||
| type: fix | ||
| --- | ||
|
|
||
| Runs and sessions replication services now auto-recover from stream errors (e.g. after a Postgres failover) instead of silently leaving replication stopped. Behaviour is configurable per service — reconnect (default), exit so a process supervisor can restart the host, or log. |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,207 @@ | ||
| import { Logger } from "@trigger.dev/core/logger"; | ||
|
|
||
| // When the LogicalReplicationClient's WAL stream errors (e.g. after a | ||
| // Postgres failover) it calls stop() on itself and stays stopped. The host | ||
| // service has to decide how to recover. Three strategies are available: | ||
| // | ||
| // - "reconnect" — re-subscribe in-process with exponential backoff. Default; | ||
| // works without a process supervisor. | ||
| // - "exit" — exit the process so an external supervisor (Docker | ||
| // restart=always, ECS, systemd, k8s, ...) replaces it. Recommended when a | ||
| // supervisor is present because it gets a clean slate every time. | ||
| // - "log" — preserve the historical no-op behaviour. Useful for | ||
| // debugging or in test environments where you want to observe the | ||
| // silent-death failure mode. | ||
| export type ReplicationErrorRecoveryStrategy = | ||
| | { | ||
| type: "reconnect"; | ||
| initialDelayMs?: number; | ||
| maxDelayMs?: number; | ||
| // 0 (or undefined) means retry forever. | ||
| maxAttempts?: number; | ||
| } | ||
| | { | ||
| type: "exit"; | ||
| exitDelayMs?: number; | ||
| exitCode?: number; | ||
| } | ||
| | { type: "log" }; | ||
|
|
||
| export type ReplicationErrorRecoveryDeps = { | ||
| strategy: ReplicationErrorRecoveryStrategy; | ||
| logger: Logger; | ||
| // Re-subscribe the underlying replication client. Implementations should | ||
| // call client.subscribe(...) and resolve once the stream is started. | ||
| reconnect: () => Promise<void>; | ||
| // True once the host service has begun graceful shutdown — recovery | ||
| // suppresses all work in that state. | ||
| isShuttingDown: () => boolean; | ||
| }; | ||
|
|
||
| export type ReplicationErrorRecovery = { | ||
| // Called from the replication client's "error" event handler. | ||
| handle(error: unknown): void; | ||
| // Called from the replication client's "start" event handler. Resets the | ||
| // reconnect attempt counter so the next failure starts from initialDelayMs. | ||
| notifyStreamStarted(): void; | ||
| // Called from the replication client's "leaderElection" event handler with | ||
| // isLeader=false. Only the reconnect strategy acts on this; exit and log | ||
| // strategies treat losing the lock as a normal multi-instance state (an | ||
| // "exit" instance would otherwise restart-loop whenever a peer holds it). | ||
| notifyLeaderElectionLost(error: unknown): void; | ||
| // Cancel any pending reconnect/exit timer. Called from shutdown(). | ||
| dispose(): void; | ||
| }; | ||
|
|
||
| export function createReplicationErrorRecovery( | ||
| deps: ReplicationErrorRecoveryDeps | ||
| ): ReplicationErrorRecovery { | ||
| const { strategy, logger, reconnect, isShuttingDown } = deps; | ||
| let attempt = 0; | ||
| let pendingReconnect: NodeJS.Timeout | null = null; | ||
| let pendingExit: NodeJS.Timeout | null = null; | ||
| let exiting = false; | ||
|
|
||
| function scheduleReconnect(error: unknown): void { | ||
| if (strategy.type !== "reconnect") return; | ||
| if (pendingReconnect) return; | ||
|
|
||
| attempt += 1; | ||
| const maxAttempts = strategy.maxAttempts ?? 0; | ||
| if (maxAttempts > 0 && attempt > maxAttempts) { | ||
| logger.error("Replication reconnect exceeded maxAttempts; giving up", { | ||
| attempt, | ||
| maxAttempts, | ||
| error, | ||
| }); | ||
| return; | ||
| } | ||
|
|
||
| const initialDelay = strategy.initialDelayMs ?? 1_000; | ||
| const maxDelay = strategy.maxDelayMs ?? 60_000; | ||
| const delay = Math.min(initialDelay * Math.pow(2, attempt - 1), maxDelay); | ||
|
|
||
| logger.error("Replication stream lost — scheduling reconnect", { | ||
| attempt, | ||
| delayMs: delay, | ||
| error, | ||
| }); | ||
|
|
||
| pendingReconnect = setTimeout(async () => { | ||
| pendingReconnect = null; | ||
| if (isShuttingDown()) return; | ||
|
|
||
| try { | ||
| await reconnect(); | ||
| // Success path is handled by notifyStreamStarted, which fires from | ||
| // the replication client's "start" event after the stream is live. | ||
| } catch (err) { | ||
| // subscribe() can throw without first emitting an "error" event — | ||
| // notably when the initial pg client.connect() fails because Postgres | ||
| // is still unreachable mid-failover. Schedule the next attempt | ||
| // ourselves so recovery doesn't silently stop. If subscribe() did | ||
| // also emit an "error" event, handle() will call scheduleReconnect() | ||
| // first; the guard on pendingReconnect makes this idempotent. | ||
| logger.error("Replication reconnect attempt failed", { | ||
| attempt, | ||
| error: err, | ||
| }); | ||
| scheduleReconnect(err); | ||
| } | ||
| }, delay); | ||
|
devin-ai-integration[bot] marked this conversation as resolved.
|
||
| } | ||
|
|
||
| function scheduleExit(): void { | ||
| if (strategy.type !== "exit") return; | ||
| if (exiting) return; | ||
| exiting = true; | ||
|
|
||
| const delay = strategy.exitDelayMs ?? 5_000; | ||
| const code = strategy.exitCode ?? 1; | ||
|
|
||
| logger.error("Fatal replication error — exiting to let process supervisor restart", { | ||
| exitCode: code, | ||
| exitDelayMs: delay, | ||
| }); | ||
|
|
||
| pendingExit = setTimeout(() => { | ||
| // eslint-disable-next-line no-process-exit | ||
| process.exit(code); | ||
| }, delay); | ||
| // Don't hold a clean shutdown back on this timer. | ||
| pendingExit.unref(); | ||
| } | ||
|
|
||
| return { | ||
| handle(error) { | ||
| if (isShuttingDown()) return; | ||
| switch (strategy.type) { | ||
| case "log": | ||
| return; | ||
| case "exit": | ||
| return scheduleExit(); | ||
| case "reconnect": | ||
| return scheduleReconnect(error); | ||
| } | ||
| }, | ||
| notifyStreamStarted() { | ||
| if (attempt > 0) { | ||
| logger.info("Replication reconnect succeeded", { attempt }); | ||
| attempt = 0; | ||
| } | ||
| }, | ||
| notifyLeaderElectionLost(error) { | ||
| if (isShuttingDown()) return; | ||
| // Only the reconnect strategy should react. For exit, losing the | ||
| // lock to a peer would otherwise trigger a restart loop. For log, | ||
| // we keep historical no-op semantics. | ||
| if (strategy.type !== "reconnect") return; | ||
| scheduleReconnect(error); | ||
| }, | ||
| dispose() { | ||
| if (pendingReconnect) { | ||
| clearTimeout(pendingReconnect); | ||
| pendingReconnect = null; | ||
| } | ||
| if (pendingExit) { | ||
| clearTimeout(pendingExit); | ||
| pendingExit = null; | ||
| } | ||
| }, | ||
| }; | ||
| } | ||
|
|
||
| // Shape of the env-driven configuration object the instance bootstrap files | ||
| // build from process.env. Kept separate from the strategy union above so the | ||
| // instance code can pass a single object regardless of which strategy is set. | ||
| export type ReplicationErrorRecoveryEnv = { | ||
| strategy: "reconnect" | "exit" | "log"; | ||
| reconnectInitialDelayMs?: number; | ||
| reconnectMaxDelayMs?: number; | ||
| reconnectMaxAttempts?: number; | ||
| exitDelayMs?: number; | ||
| exitCode?: number; | ||
| }; | ||
|
|
||
| export function strategyFromEnv( | ||
| env: ReplicationErrorRecoveryEnv | ||
| ): ReplicationErrorRecoveryStrategy { | ||
| switch (env.strategy) { | ||
| case "exit": | ||
| return { | ||
| type: "exit", | ||
| exitDelayMs: env.exitDelayMs, | ||
| exitCode: env.exitCode, | ||
| }; | ||
| case "log": | ||
| return { type: "log" }; | ||
| case "reconnect": | ||
| default: | ||
| return { | ||
| type: "reconnect", | ||
| initialDelayMs: env.reconnectInitialDelayMs, | ||
| maxDelayMs: env.reconnectMaxDelayMs, | ||
| maxAttempts: env.reconnectMaxAttempts, | ||
| }; | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3,6 +3,7 @@ import invariant from "tiny-invariant"; | |
| import { env } from "~/env.server"; | ||
| import { singleton } from "~/utils/singleton"; | ||
| import { meter, provider } from "~/v3/tracer.server"; | ||
| import { strategyFromEnv } from "./replicationErrorRecovery.server"; | ||
| import { RunsReplicationService } from "./runsReplicationService.server"; | ||
| import { signalsEmitter } from "./signals.server"; | ||
|
|
||
|
|
@@ -69,6 +70,14 @@ function initializeRunsReplicationInstance() { | |
| insertStrategy: env.RUN_REPLICATION_INSERT_STRATEGY, | ||
| disablePayloadInsert: env.RUN_REPLICATION_DISABLE_PAYLOAD_INSERT === "1", | ||
| disableErrorFingerprinting: env.RUN_REPLICATION_DISABLE_ERROR_FINGERPRINTING === "1", | ||
| errorRecovery: strategyFromEnv({ | ||
| strategy: env.RUN_REPLICATION_ERROR_STRATEGY, | ||
| reconnectInitialDelayMs: env.REPLICATION_RECONNECT_INITIAL_DELAY_MS, | ||
| reconnectMaxDelayMs: env.REPLICATION_RECONNECT_MAX_DELAY_MS, | ||
| reconnectMaxAttempts: env.REPLICATION_RECONNECT_MAX_ATTEMPTS, | ||
| exitDelayMs: env.RUN_REPLICATION_EXIT_DELAY_MS, | ||
| exitCode: env.RUN_REPLICATION_EXIT_CODE, | ||
| }), | ||
| }); | ||
|
|
||
| if (env.RUN_REPLICATION_ENABLED === "1") { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🚩 Initial start() failure from pg connection error bypasses error recovery If (Refers to lines 83-93) Was this helpful? React with 👍 or 👎 to provide feedback. |
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3,6 +3,7 @@ import invariant from "tiny-invariant"; | |
| import { env } from "~/env.server"; | ||
| import { singleton } from "~/utils/singleton"; | ||
| import { meter, provider } from "~/v3/tracer.server"; | ||
| import { strategyFromEnv } from "./replicationErrorRecovery.server"; | ||
| import { SessionsReplicationService } from "./sessionsReplicationService.server"; | ||
|
|
||
| export const sessionsReplicationInstance = singleton( | ||
|
|
@@ -66,6 +67,14 @@ function initializeSessionsReplicationInstance() { | |
| insertBaseDelayMs: env.SESSION_REPLICATION_INSERT_BASE_DELAY_MS, | ||
| insertMaxDelayMs: env.SESSION_REPLICATION_INSERT_MAX_DELAY_MS, | ||
| insertStrategy: env.SESSION_REPLICATION_INSERT_STRATEGY, | ||
| errorRecovery: strategyFromEnv({ | ||
| strategy: env.SESSION_REPLICATION_ERROR_STRATEGY, | ||
| reconnectInitialDelayMs: env.REPLICATION_RECONNECT_INITIAL_DELAY_MS, | ||
| reconnectMaxDelayMs: env.REPLICATION_RECONNECT_MAX_DELAY_MS, | ||
| reconnectMaxAttempts: env.REPLICATION_RECONNECT_MAX_ATTEMPTS, | ||
| exitDelayMs: env.SESSION_REPLICATION_EXIT_DELAY_MS, | ||
| exitCode: env.SESSION_REPLICATION_EXIT_CODE, | ||
| }), | ||
| }); | ||
|
Comment on lines
+70
to
78
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🚩 Sessions replication service is created with error recovery but never started in the instance file The Was this helpful? React with 👍 or 👎 to provide feedback. |
||
|
|
||
| return service; | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.