1+ import { S3Client } from "@aws-sdk/client-s3" ;
2+ import { Upload } from "@aws-sdk/lib-storage" ;
13import { type DataFunctionArgs } from "@remix-run/node" ;
2- import fs from "fs" ;
3- import os from "os" ;
4- import path from "path" ;
5- import { PassThrough } from "stream" ;
64import v8 from "v8" ;
75import { prisma } from "~/db.server" ;
86import { authenticateApiRequestWithPersonalAccessToken } from "~/services/personalAccessToken.server" ;
7+ import { logger } from "~/services/logger.server" ;
8+
9+ // Use 100MB parts for faster parallel uploads of large snapshots
10+ const PART_SIZE = 100 * 1024 * 1024 ;
11+ // Use high parallelism to maximize upload speed
12+ const QUEUE_SIZE = 8 ;
913
1014// Format date as yyyy-MM-dd HH_mm_ss_SSS
1115function formatDate ( date : Date ) {
@@ -24,6 +28,32 @@ function formatDate(date: Date) {
2428 . padStart ( 2 , "0" ) } _${ milliseconds . toString ( ) . padStart ( 3 , "0" ) } `;
2529}
2630
31+ function getS3Config ( ) {
32+ const bucket = process . env . SNAPSHOT_S3_BUCKET ;
33+ const region = process . env . SNAPSHOT_S3_REGION ?? "us-east-1" ;
34+
35+ if ( ! bucket ) {
36+ return undefined ;
37+ }
38+
39+ // Optional - only needed for non-AWS S3 (MinIO, R2, etc.) or local dev
40+ const endpoint = process . env . SNAPSHOT_S3_ENDPOINT ;
41+ const accessKeyId = process . env . SNAPSHOT_S3_ACCESS_KEY_ID ;
42+ const secretAccessKey = process . env . SNAPSHOT_S3_SECRET_ACCESS_KEY ;
43+
44+ // If explicit credentials provided, use them (local dev / non-AWS)
45+ // Otherwise, SDK uses default credential chain (IAM role, env vars, etc.)
46+ const credentials =
47+ accessKeyId && secretAccessKey ? { accessKeyId, secretAccessKey } : undefined ;
48+
49+ return {
50+ bucket,
51+ region,
52+ endpoint,
53+ credentials,
54+ } ;
55+ }
56+
2757export async function loader ( { request } : DataFunctionArgs ) {
2858 const authenticationResult = await authenticateApiRequestWithPersonalAccessToken ( request ) ;
2959
@@ -41,31 +71,94 @@ export async function loader({ request }: DataFunctionArgs) {
4171 throw new Response ( "You must be an admin to perform this action" , { status : 403 } ) ;
4272 }
4373
44- const tempDir = os . tmpdir ( ) ;
45- const filepath = path . join (
46- tempDir ,
47- `${ getTaskIdentifier ( ) } -${ formatDate ( new Date ( ) ) } .heapsnapshot`
48- ) ;
74+ const s3Config = getS3Config ( ) ;
4975
50- const snapshotPath = v8 . writeHeapSnapshot ( filepath ) ;
51- if ( ! snapshotPath ) {
52- throw new Response ( "No snapshot saved" , { status : 500 } ) ;
76+ if ( ! s3Config ) {
77+ throw new Response (
78+ "S3 is not configured. Set SNAPSHOT_S3_ENDPOINT, SNAPSHOT_S3_BUCKET, SNAPSHOT_S3_ACCESS_KEY_ID, and SNAPSHOT_S3_SECRET_ACCESS_KEY." ,
79+ { status : 500 }
80+ ) ;
5381 }
5482
55- const body = new PassThrough ( ) ;
56- const stream = fs . createReadStream ( snapshotPath ) ;
57- stream . on ( "open" , ( ) => stream . pipe ( body ) ) ;
58- stream . on ( "error" , ( err ) => body . end ( err ) ) ;
59- stream . on ( "end" , ( ) => body . end ( ) ) ;
60-
61- return new Response ( body as any , {
62- status : 200 ,
63- headers : {
64- "Content-Type" : "application/octet-stream" ,
65- "Content-Disposition" : `attachment; filename="${ path . basename ( snapshotPath ) } "` ,
66- "Content-Length" : ( await fs . promises . stat ( snapshotPath ) ) . size . toString ( ) ,
67- } ,
83+ const s3Client = new S3Client ( {
84+ region : s3Config . region ,
85+ ...( s3Config . credentials && { credentials : s3Config . credentials } ) ,
86+ ...( s3Config . endpoint && { endpoint : s3Config . endpoint , forcePathStyle : true } ) ,
87+ } ) ;
88+
89+ const filename = `${ getTaskIdentifier ( ) } -${ formatDate ( new Date ( ) ) } .heapsnapshot` ;
90+ const s3Key = `snapshots/${ filename } ` ;
91+
92+ logger . info ( "Taking heap snapshot and streaming to S3" , {
93+ bucket : s3Config . bucket ,
94+ key : s3Key ,
6895 } ) ;
96+
97+ try {
98+ const startTime = Date . now ( ) ;
99+ const snapshotStream = v8 . getHeapSnapshot ( ) ;
100+
101+ const upload = new Upload ( {
102+ client : s3Client ,
103+ params : {
104+ Bucket : s3Config . bucket ,
105+ Key : s3Key ,
106+ Body : snapshotStream ,
107+ ContentType : "application/octet-stream" ,
108+ } ,
109+ queueSize : QUEUE_SIZE ,
110+ partSize : PART_SIZE ,
111+ leavePartsOnError : false ,
112+ } ) ;
113+
114+ let totalBytes = 0 ;
115+ upload . on ( "httpUploadProgress" , ( progress ) => {
116+ totalBytes = progress . loaded ?? totalBytes ;
117+ logger . info ( "Upload progress" , {
118+ loaded : progress . loaded ,
119+ part : progress . part ,
120+ } ) ;
121+ } ) ;
122+
123+ await upload . done ( ) ;
124+ const duration = Date . now ( ) - startTime ;
125+
126+ logger . info ( "Heap snapshot uploaded to S3" , {
127+ bucket : s3Config . bucket ,
128+ key : s3Key ,
129+ durationMs : duration ,
130+ durationSec : Math . round ( duration / 1000 ) ,
131+ totalBytes,
132+ uploadSpeedMBps : totalBytes > 0 ? Math . round ( ( totalBytes / 1024 / 1024 / ( duration / 1000 ) ) * 10 ) / 10 : 0 ,
133+ } ) ;
134+
135+ return new Response (
136+ JSON . stringify ( {
137+ success : true ,
138+ bucket : s3Config . bucket ,
139+ key : s3Key ,
140+ sizeBytes : totalBytes ,
141+ durationMs : duration ,
142+ } ) ,
143+ {
144+ status : 200 ,
145+ headers : {
146+ "Content-Type" : "application/json" ,
147+ } ,
148+ }
149+ ) ;
150+ } catch ( error ) {
151+ logger . error ( "Failed to upload heap snapshot to S3" , {
152+ error : error instanceof Error ? error . message : String ( error ) ,
153+ bucket : s3Config . bucket ,
154+ key : s3Key ,
155+ } ) ;
156+
157+ throw new Response (
158+ `Failed to upload snapshot to S3: ${ error instanceof Error ? error . message : String ( error ) } ` ,
159+ { status : 500 }
160+ ) ;
161+ }
69162}
70163
71164function getTaskIdentifier ( ) {
0 commit comments