From 4ce68a928cf8cccaa4686310529c822d2607dbab Mon Sep 17 00:00:00 2001 From: Adam Spofford Date: Mon, 2 Feb 2026 08:06:16 -0800 Subject: [PATCH 1/5] Implement snapshot uploading/downloading --- .claude/CLAUDE.md | 4 +- .github/scripts/provision-linux-test.sh | 2 +- .github/scripts/provision-macos-test.sh | 2 +- CHANGELOG.md | 2 +- Cargo.lock | 1 + Cargo.toml | 1 + README.md | 4 +- crates/icp-cli/Cargo.toml | 1 + .../commands/canister/snapshot/download.rs | 211 +++++ .../src/commands/canister/snapshot/mod.rs | 6 + .../src/commands/canister/snapshot/upload.rs | 209 +++++ crates/icp-cli/src/main.rs | 12 + crates/icp-cli/src/operations/mod.rs | 1 + .../src/operations/snapshot_transfer.rs | 880 ++++++++++++++++++ crates/icp-cli/tests/assets/limit_transfer.py | 62 ++ .../icp-cli/tests/canister_snapshot_tests.rs | 711 ++++++++++++++ 16 files changed, 2102 insertions(+), 7 deletions(-) create mode 100644 crates/icp-cli/src/commands/canister/snapshot/download.rs create mode 100644 crates/icp-cli/src/commands/canister/snapshot/upload.rs create mode 100644 crates/icp-cli/src/operations/snapshot_transfer.rs create mode 100644 crates/icp-cli/tests/assets/limit_transfer.py diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md index 8ae9da43..69d8c23e 100644 --- a/.claude/CLAUDE.md +++ b/.claude/CLAUDE.md @@ -26,8 +26,8 @@ cargo test # Run tests for specific package cargo test -p icp-cli -# Run a specific test -cargo test +# Run a specific test from .rs +cargo test --test -- # Run with verbose output cargo test -- --nocapture diff --git a/.github/scripts/provision-linux-test.sh b/.github/scripts/provision-linux-test.sh index b0622ce9..93ed67f5 100755 --- a/.github/scripts/provision-linux-test.sh +++ b/.github/scripts/provision-linux-test.sh @@ -1,3 +1,3 @@ #!/bin/bash set -euo pipefail -sudo apt-get update && sudo apt-get install -y softhsm2 +sudo apt-get update && sudo apt-get install -y softhsm2 mitmproxy diff --git a/.github/scripts/provision-macos-test.sh b/.github/scripts/provision-macos-test.sh index 9d0f3730..c263885a 100755 --- a/.github/scripts/provision-macos-test.sh +++ b/.github/scripts/provision-macos-test.sh @@ -1,3 +1,3 @@ #!/bin/bash set -euo pipefail -brew install softhsm +brew install softhsm mitmproxy diff --git a/CHANGELOG.md b/CHANGELOG.md index 7409f6a9..bc93c2ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # Unreleased -* feat: `icp canister snapshot` - create, delete, restore, list canister snapshots +* feat: `icp canister snapshot` - create, delete, restore, list, download, and upload canister snapshots # v0.1.0-beta.6 diff --git a/Cargo.lock b/Cargo.lock index a6cb77a0..e287b198 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3209,6 +3209,7 @@ dependencies = [ "anyhow", "assert_cmd", "async-trait", + "backoff", "bigdecimal", "bip32", "byte-unit", diff --git a/Cargo.toml b/Cargo.toml index 45d3ccdb..b6e95a5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ publish = false anyhow = "1.0.100" async-dropper = { version = "0.3.0", features = ["tokio", "simple"] } async-trait = "0.1.88" +backoff = { version = "0.4", features = ["tokio"] } bigdecimal = "0.4.10" bip32 = "0.5.0" bollard = "0.19.4" diff --git a/README.md b/README.md index 40b301f4..a847ab2f 100644 --- a/README.md +++ b/README.md @@ -49,8 +49,6 @@ Contributions are welcome! See [CONTRIBUTING.md](.github/CONTRIBUTING.md) for de ### Prerequisites - Rust 1.88.0+ ([rustup.rs](https://rustup.rs/)) -- `wasm-tools` — Install via `cargo install wasm-tools` (required for test suite) -- Platform dependencies: | Platform | Install | |---------------|----------------------------------------------------------------------------------------------------------| @@ -60,6 +58,8 @@ Contributions are welcome! See [CONTRIBUTING.md](.github/CONTRIBUTING.md) for de | Arch Linux | `sudo pacman -S base-devel openssl` | | Windows | VS build tools (see [Rustup's guide](https://rust-lang.github.io/rustup/installation/windows-msvc.html)) | +Tests additionally depend on `wasm-tools`, `mitmproxy`, and SoftHSM2. + ### Build and Test ```bash diff --git a/crates/icp-cli/Cargo.toml b/crates/icp-cli/Cargo.toml index 6607f6ce..d083b05a 100644 --- a/crates/icp-cli/Cargo.toml +++ b/crates/icp-cli/Cargo.toml @@ -14,6 +14,7 @@ path = "src/main.rs" anstyle = "1.0.13" anyhow.workspace = true async-trait.workspace = true +backoff.workspace = true bigdecimal.workspace = true bip32.workspace = true byte-unit.workspace = true diff --git a/crates/icp-cli/src/commands/canister/snapshot/download.rs b/crates/icp-cli/src/commands/canister/snapshot/download.rs new file mode 100644 index 00000000..4f0bd1a2 --- /dev/null +++ b/crates/icp-cli/src/commands/canister/snapshot/download.rs @@ -0,0 +1,211 @@ +use byte_unit::{Byte, UnitType}; +use clap::Args; +use icp::context::Context; +use icp::prelude::*; + +use super::SnapshotId; +use crate::commands::args; +use crate::operations::misc::format_timestamp; +use crate::operations::snapshot_transfer::{ + BlobType, SnapshotPaths, SnapshotTransferError, create_transfer_progress_bar, + delete_download_progress, download_blob_to_file, download_wasm_chunk, load_download_progress, + load_metadata, read_snapshot_metadata, save_metadata, +}; + +#[derive(Debug, Args)] +pub(crate) struct DownloadArgs { + #[command(flatten)] + pub(crate) cmd_args: args::CanisterCommandArgs, + + /// The snapshot ID to download (hex-encoded) + snapshot_id: SnapshotId, + + /// Output directory for the snapshot files + #[arg(long, short = 'o')] + output: PathBuf, + + /// Resume a previously interrupted download + #[arg(long)] + resume: bool, +} + +pub(crate) async fn exec(ctx: &Context, args: &DownloadArgs) -> Result<(), anyhow::Error> { + let selections = args.cmd_args.selections(); + + let agent = ctx + .get_agent( + &selections.identity, + &selections.network, + &selections.environment, + ) + .await?; + let cid = ctx + .get_canister_id( + &selections.canister, + &selections.network, + &selections.environment, + ) + .await?; + + let name = &args.cmd_args.canister; + let snapshot_id = &args.snapshot_id.0; + + // Open or create the snapshot directory with a lock + let snapshot_dir = SnapshotPaths::new(args.output.clone())?; + + snapshot_dir + .with_write(async |paths| { + // Ensure directories exist + paths.ensure_dirs()?; + + // Check if we should resume or start fresh + let metadata = if args.resume && paths.metadata_path().exists() { + ctx.term.write_line("Resuming previous download...")?; + load_metadata(paths)? + } else if !args.resume { + // Check if directory has existing files (besides lock) + let has_files = paths.metadata_path().exists() + || paths.wasm_module_path().exists() + || paths.wasm_memory_path().exists() + || paths.stable_memory_path().exists(); + + if has_files { + return Err(SnapshotTransferError::DirectoryNotEmpty { + path: args.output.clone(), + } + .into()); + } + + // Fetch metadata from canister + ctx.term.write_line(&format!( + "Downloading snapshot {id} from canister {name} ({cid})", + id = hex::encode(snapshot_id), + ))?; + + let metadata = read_snapshot_metadata(&agent, cid, snapshot_id).await?; + + ctx.term.write_line(&format!( + " Timestamp: {}", + format_timestamp(metadata.taken_at_timestamp) + ))?; + + let total_size = metadata.wasm_module_size + + metadata.wasm_memory_size + + metadata.stable_memory_size; + ctx.term.write_line(&format!( + " Total size: {}", + Byte::from_u64(total_size).get_appropriate_unit(UnitType::Binary) + ))?; + + // Save metadata + save_metadata(&metadata, paths)?; + + metadata + } else { + return Err(SnapshotTransferError::NoExistingDownload { + path: args.output.clone(), + } + .into()); + }; + + // Load download progress (handles gaps from previous interrupted downloads) + let mut progress = load_download_progress(paths)?; + + // Download WASM module + if metadata.wasm_module_size > 0 { + if !progress.wasm_module.is_complete(metadata.wasm_module_size) { + let pb = create_transfer_progress_bar(metadata.wasm_module_size, "WASM module"); + download_blob_to_file( + &agent, + cid, + snapshot_id, + BlobType::WasmModule, + metadata.wasm_module_size, + paths, + &mut progress, + &pb, + ) + .await?; + pb.finish_with_message("done"); + } else { + ctx.term.write_line("WASM module: already complete")?; + } + } + + // Download WASM memory + if metadata.wasm_memory_size > 0 { + if !progress.wasm_memory.is_complete(metadata.wasm_memory_size) { + let pb = create_transfer_progress_bar(metadata.wasm_memory_size, "WASM memory"); + download_blob_to_file( + &agent, + cid, + snapshot_id, + BlobType::WasmMemory, + metadata.wasm_memory_size, + paths, + &mut progress, + &pb, + ) + .await?; + pb.finish_with_message("done"); + } else { + ctx.term.write_line("WASM memory: already complete")?; + } + } + + // Download stable memory + if metadata.stable_memory_size > 0 { + if !progress + .stable_memory + .is_complete(metadata.stable_memory_size) + { + let pb = + create_transfer_progress_bar(metadata.stable_memory_size, "Stable memory"); + download_blob_to_file( + &agent, + cid, + snapshot_id, + BlobType::StableMemory, + metadata.stable_memory_size, + paths, + &mut progress, + &pb, + ) + .await?; + pb.finish_with_message("done"); + } else { + ctx.term.write_line("Stable memory: already complete")?; + } + } else { + // Create empty stable memory file + icp::fs::write(&paths.stable_memory_path(), &[])?; + } + + // Download WASM chunk store + if !metadata.wasm_chunk_store.is_empty() { + ctx.term.write_line(&format!( + "Downloading {} WASM chunks...", + metadata.wasm_chunk_store.len() + ))?; + + for chunk_hash in &metadata.wasm_chunk_store { + let chunk_path = paths.wasm_chunk_path(&chunk_hash.hash); + if !chunk_path.exists() { + download_wasm_chunk(&agent, cid, snapshot_id, chunk_hash, paths).await?; + } + } + ctx.term.write_line("WASM chunks: done")?; + } + + // Clean up progress file on success + delete_download_progress(paths)?; + + ctx.term + .write_line(&format!("Snapshot downloaded to {}", args.output))?; + + Ok::<_, anyhow::Error>(()) + }) + .await??; + + Ok(()) +} diff --git a/crates/icp-cli/src/commands/canister/snapshot/mod.rs b/crates/icp-cli/src/commands/canister/snapshot/mod.rs index b82db8d9..04b18749 100644 --- a/crates/icp-cli/src/commands/canister/snapshot/mod.rs +++ b/crates/icp-cli/src/commands/canister/snapshot/mod.rs @@ -4,8 +4,10 @@ use clap::Subcommand; pub(crate) mod create; pub(crate) mod delete; +pub(crate) mod download; pub(crate) mod list; pub(crate) mod restore; +pub(crate) mod upload; #[derive(Subcommand, Debug)] pub(crate) enum Command { @@ -13,10 +15,14 @@ pub(crate) enum Command { Create(create::CreateArgs), /// Delete a canister snapshot Delete(delete::DeleteArgs), + /// Download a snapshot to local disk + Download(download::DownloadArgs), /// List all snapshots for a canister List(list::ListArgs), /// Restore a canister from a snapshot Restore(restore::RestoreArgs), + /// Upload a snapshot from local disk + Upload(upload::UploadArgs), } /// A hex-encoded snapshot ID. diff --git a/crates/icp-cli/src/commands/canister/snapshot/upload.rs b/crates/icp-cli/src/commands/canister/snapshot/upload.rs new file mode 100644 index 00000000..9d4e55f9 --- /dev/null +++ b/crates/icp-cli/src/commands/canister/snapshot/upload.rs @@ -0,0 +1,209 @@ +use byte_unit::{Byte, UnitType}; +use clap::Args; +use icp::context::Context; +use icp::prelude::*; + +use super::SnapshotId; +use crate::commands::args; +use crate::operations::misc::format_timestamp; +use crate::operations::snapshot_transfer::{ + BlobType, SnapshotPaths, SnapshotTransferError, UploadProgress, create_transfer_progress_bar, + delete_upload_progress, load_metadata, load_upload_progress, save_upload_progress, + upload_blob_from_file, upload_snapshot_metadata, upload_wasm_chunk, +}; + +#[derive(Debug, Args)] +pub(crate) struct UploadArgs { + #[command(flatten)] + pub(crate) cmd_args: args::CanisterCommandArgs, + + /// Input directory containing the snapshot files + #[arg(long, short = 'i')] + input: PathBuf, + + /// Replace an existing snapshot instead of creating a new one + #[arg(long)] + replace: Option, + + /// Resume a previously interrupted upload + #[arg(long)] + resume: bool, +} + +pub(crate) async fn exec(ctx: &Context, args: &UploadArgs) -> Result<(), anyhow::Error> { + let selections = args.cmd_args.selections(); + + let agent = ctx + .get_agent( + &selections.identity, + &selections.network, + &selections.environment, + ) + .await?; + let cid = ctx + .get_canister_id( + &selections.canister, + &selections.network, + &selections.environment, + ) + .await?; + + let name = &args.cmd_args.canister; + + // Open the snapshot directory with a lock + let snapshot_dir = SnapshotPaths::new(args.input.clone())?; + + let snapshot_id = snapshot_dir + .with_write(async |paths| { + // Load metadata + let metadata = load_metadata(paths)?; + + ctx.term + .write_line(&format!("Uploading snapshot to canister {name} ({cid})",))?; + ctx.term.write_line(&format!( + " Original timestamp: {}", + format_timestamp(metadata.taken_at_timestamp) + ))?; + + let total_size = + metadata.wasm_module_size + metadata.wasm_memory_size + metadata.stable_memory_size; + ctx.term.write_line(&format!( + " Total size: {}", + Byte::from_u64(total_size).get_appropriate_unit(UnitType::Binary) + ))?; + + // Load or create upload progress + let mut progress = if args.resume { + match load_upload_progress(paths) { + Ok(progress) => { + ctx.term.write_line(&format!( + "Resuming upload to snapshot {}", + progress.snapshot_id + ))?; + progress + } + Err(SnapshotTransferError::NoUploadProgress { .. }) => { + return Err(SnapshotTransferError::NoUploadProgress { + path: args.input.clone(), + } + .into()); + } + Err(e) => return Err(e.into()), + } + } else { + // Upload metadata to create a new snapshot + let replace_snapshot = args.replace.as_ref().map(|s| s.0.as_slice()); + let result = + upload_snapshot_metadata(&agent, cid, &metadata, replace_snapshot).await?; + + let snapshot_id_hex = hex::encode(&result.snapshot_id); + ctx.term + .write_line(&format!("Created snapshot {} for upload", snapshot_id_hex))?; + + let mut progress = UploadProgress::new(snapshot_id_hex); + progress.metadata_uploaded = true; + save_upload_progress(&progress, paths)?; + progress + }; + + let snapshot_id_bytes = + hex::decode(&progress.snapshot_id).expect("invalid snapshot ID in progress file"); + + // Upload WASM module + if metadata.wasm_module_size > 0 { + if progress.wasm_module_offset < metadata.wasm_module_size { + let pb = create_transfer_progress_bar(metadata.wasm_module_size, "WASM module"); + upload_blob_from_file( + &agent, + cid, + &snapshot_id_bytes, + BlobType::WasmModule, + paths, + &mut progress, + &pb, + ) + .await?; + pb.finish_with_message("done"); + } else { + ctx.term.write_line("WASM module: already complete")?; + } + } + + // Upload WASM memory + if metadata.wasm_memory_size > 0 { + if progress.wasm_memory_offset < metadata.wasm_memory_size { + let pb = create_transfer_progress_bar(metadata.wasm_memory_size, "WASM memory"); + upload_blob_from_file( + &agent, + cid, + &snapshot_id_bytes, + BlobType::WasmMemory, + paths, + &mut progress, + &pb, + ) + .await?; + pb.finish_with_message("done"); + } else { + ctx.term.write_line("WASM memory: already complete")?; + } + } + + // Upload stable memory + if metadata.stable_memory_size > 0 { + if progress.stable_memory_offset < metadata.stable_memory_size { + let pb = + create_transfer_progress_bar(metadata.stable_memory_size, "Stable memory"); + upload_blob_from_file( + &agent, + cid, + &snapshot_id_bytes, + BlobType::StableMemory, + paths, + &mut progress, + &pb, + ) + .await?; + pb.finish_with_message("done"); + } else { + ctx.term.write_line("Stable memory: already complete")?; + } + } + + // Upload WASM chunk store + if !metadata.wasm_chunk_store.is_empty() { + ctx.term.write_line(&format!( + "Uploading {} WASM chunks...", + metadata.wasm_chunk_store.len() + ))?; + + for chunk_hash in &metadata.wasm_chunk_store { + let hash_hex = hex::encode(&chunk_hash.hash); + if !progress.wasm_chunks_uploaded.contains(&hash_hex) { + upload_wasm_chunk(&agent, cid, &snapshot_id_bytes, &chunk_hash.hash, paths) + .await?; + progress.wasm_chunks_uploaded.insert(hash_hex); + save_upload_progress(&progress, paths)?; + } + } + ctx.term.write_line("WASM chunks: done")?; + } + + // Clean up progress file on success + delete_upload_progress(paths)?; + + ctx.term.write_line(&format!( + "Snapshot {} uploaded successfully", + progress.snapshot_id + ))?; + + Ok::<_, anyhow::Error>(progress.snapshot_id) + }) + .await??; + + ctx.term.write_line(&format!( + "Use `icp canister snapshot restore {name} {snapshot_id}` to restore from this snapshot" + ))?; + + Ok(()) +} diff --git a/crates/icp-cli/src/main.rs b/crates/icp-cli/src/main.rs index 45aecf94..bb1b7b5f 100644 --- a/crates/icp-cli/src/main.rs +++ b/crates/icp-cli/src/main.rs @@ -253,6 +253,12 @@ async fn main() -> Result<(), Error> { .await? } + commands::canister::snapshot::Command::Download(args) => { + commands::canister::snapshot::download::exec(&ctx, &args) + .instrument(trace_span) + .await? + } + commands::canister::snapshot::Command::List(args) => { commands::canister::snapshot::list::exec(&ctx, &args) .instrument(trace_span) @@ -264,6 +270,12 @@ async fn main() -> Result<(), Error> { .instrument(trace_span) .await? } + + commands::canister::snapshot::Command::Upload(args) => { + commands::canister::snapshot::upload::exec(&ctx, &args) + .instrument(trace_span) + .await? + } }, commands::canister::Command::Start(args) => { diff --git a/crates/icp-cli/src/operations/mod.rs b/crates/icp-cli/src/operations/mod.rs index 4ce01b02..6d6daf9e 100644 --- a/crates/icp-cli/src/operations/mod.rs +++ b/crates/icp-cli/src/operations/mod.rs @@ -3,6 +3,7 @@ pub(crate) mod build; pub(crate) mod create; pub(crate) mod install; pub(crate) mod settings; +pub(crate) mod snapshot_transfer; pub(crate) mod sync; pub(crate) mod token; diff --git a/crates/icp-cli/src/operations/snapshot_transfer.rs b/crates/icp-cli/src/operations/snapshot_transfer.rs new file mode 100644 index 00000000..015e683a --- /dev/null +++ b/crates/icp-cli/src/operations/snapshot_transfer.rs @@ -0,0 +1,880 @@ +use std::{ + collections::{BTreeMap, HashSet}, + io::SeekFrom, +}; + +use backoff::{ExponentialBackoff, backoff::Backoff}; +use futures::{StreamExt, stream::FuturesUnordered}; +use ic_agent::{Agent, AgentError, export::Principal}; +use ic_management_canister_types::{ + ChunkHash, ReadCanisterSnapshotDataArgs, ReadCanisterSnapshotMetadataArgs, + ReadCanisterSnapshotMetadataResult, SnapshotDataKind, SnapshotDataOffset, + UploadCanisterSnapshotDataArgs, UploadCanisterSnapshotMetadataArgs, + UploadCanisterSnapshotMetadataResult, +}; +use ic_utils::interfaces::ManagementCanister; +use icp::{ + fs::lock::{DirectoryStructureLock, LWrite, LockError, PathsAccess}, + prelude::*, +}; +use indicatif::{ProgressBar, ProgressStyle}; +use serde::{Deserialize, Serialize}; +use snafu::{ResultExt, Snafu}; +use tokio::{ + fs::File, + io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}, +}; +use tracing::debug; + +/// Maximum chunk size for snapshot data transfers (2 MB, matching dfx). +pub const MAX_CHUNK_SIZE: u64 = 2_000_000; + +/// Provides access to paths within a snapshot directory. +pub struct SnapshotPaths { + dir: PathBuf, +} + +impl SnapshotPaths { + pub fn new(dir: PathBuf) -> Result { + DirectoryStructureLock::open_or_create(Self { dir }) + } + + pub fn dir(&self) -> &Path { + &self.dir + } + + pub fn metadata_path(&self) -> PathBuf { + self.dir.join("metadata.json") + } + + pub fn wasm_module_path(&self) -> PathBuf { + self.dir.join("wasm_module.bin") + } + + pub fn wasm_memory_path(&self) -> PathBuf { + self.dir.join("wasm_memory.bin") + } + + pub fn stable_memory_path(&self) -> PathBuf { + self.dir.join("stable_memory.bin") + } + + pub fn wasm_chunk_store_dir(&self) -> PathBuf { + self.dir.join("wasm_chunk_store") + } + + pub fn wasm_chunk_path(&self, hash: &[u8]) -> PathBuf { + self.wasm_chunk_store_dir() + .join(format!("{}.bin", hex::encode(hash))) + } + + pub fn upload_progress_path(&self) -> PathBuf { + self.dir.join(".upload_progress.json") + } + + pub fn download_progress_path(&self) -> PathBuf { + self.dir.join(".download_progress.json") + } + + /// Ensure the directory and wasm chunk store subdirectory exist. + pub fn ensure_dirs(&self) -> Result<(), icp::fs::IoError> { + icp::fs::create_dir_all(&self.dir)?; + icp::fs::create_dir_all(&self.wasm_chunk_store_dir())?; + Ok(()) + } + + pub fn blob_path(&self, blob_type: BlobType) -> PathBuf { + match blob_type { + BlobType::WasmModule => self.wasm_module_path(), + BlobType::WasmMemory => self.wasm_memory_path(), + BlobType::StableMemory => self.stable_memory_path(), + } + } +} + +impl PathsAccess for SnapshotPaths { + fn lock_file(&self) -> PathBuf { + self.dir.join(".lock") + } +} + +pub type SnapshotDirectory = DirectoryStructureLock; + +#[derive(Debug, Snafu)] +pub enum SnapshotTransferError { + #[snafu(display("Failed to read snapshot metadata for canister {canister_id}"))] + ReadMetadata { + canister_id: Principal, + #[snafu(source(from(AgentError, Box::new)))] + source: Box, + }, + + #[snafu(display("Failed to read snapshot data chunk at offset {offset}"))] + ReadDataChunk { + offset: u64, + #[snafu(source(from(AgentError, Box::new)))] + source: Box, + }, + + #[snafu(display("Failed to read WASM chunk with hash {hash}"))] + ReadWasmChunk { + hash: String, + #[snafu(source(from(AgentError, Box::new)))] + source: Box, + }, + + #[snafu(display("Failed to upload snapshot metadata for canister {canister_id}"))] + UploadMetadata { + canister_id: Principal, + #[snafu(source(from(AgentError, Box::new)))] + source: Box, + }, + + #[snafu(display("Failed to upload snapshot data chunk at offset {offset}"))] + UploadDataChunk { + offset: u64, + #[snafu(source(from(AgentError, Box::new)))] + source: Box, + }, + + #[snafu(display("Failed to upload WASM chunk with hash {hash}"))] + UploadWasmChunk { + hash: String, + #[snafu(source(from(AgentError, Box::new)))] + source: Box, + }, + + #[snafu(transparent)] + FsIo { source: icp::fs::IoError }, + + #[snafu(transparent)] + FsRename { source: icp::fs::RenameError }, + + #[snafu(transparent)] + Json { source: icp::fs::json::Error }, + + #[snafu(transparent)] + Lock { source: LockError }, + + #[snafu(display("Failed to open blob file for resume at {path}"))] + OpenBlobForResume { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to create blob file at {path}"))] + CreateBlobFile { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to seek in blob file at {path}"))] + SeekBlobFile { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to write chunk to blob file at {path}"))] + WriteBlobChunk { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to flush blob file at {path}"))] + FlushBlobFile { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to open blob file for upload at {path}"))] + OpenBlobForUpload { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to read chunk from blob file at {path}"))] + ReadBlobChunk { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to get file size at {path}"))] + GetBlobFileSize { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display( + "Directory {path} is not empty. Use --resume to continue a previous download or choose an empty directory." + ))] + DirectoryNotEmpty { path: PathBuf }, + + #[snafu(display("Cannot resume: no existing download found in {path}"))] + NoExistingDownload { path: PathBuf }, + + #[snafu(display("Cannot resume: no upload progress file found in {path}"))] + NoUploadProgress { path: PathBuf }, + + #[snafu(display( + "Upload progress file references snapshot {expected} but resuming with {actual}" + ))] + SnapshotIdMismatch { expected: String, actual: String }, + + #[snafu(display("Missing required file: {path}"))] + MissingFile { path: PathBuf }, + + #[snafu(display("Failed to create download progress file at {path}"))] + CreateDownloadProgress { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to write download progress file at {path}"))] + WriteDownloadProgress { + source: std::io::Error, + path: PathBuf, + }, + + #[snafu(display("Failed to sync download progress file at {path}"))] + SyncDownloadProgress { + source: std::io::Error, + path: PathBuf, + }, +} + +/// Tracks upload progress for resumable uploads. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UploadProgress { + /// The snapshot ID being uploaded to. + pub snapshot_id: String, + /// Whether metadata has been uploaded. + pub metadata_uploaded: bool, + /// Byte offset for WASM module upload progress. + pub wasm_module_offset: u64, + /// Byte offset for WASM memory upload progress. + pub wasm_memory_offset: u64, + /// Byte offset for stable memory upload progress. + pub stable_memory_offset: u64, + /// Set of WASM chunk hashes that have been uploaded. + pub wasm_chunks_uploaded: HashSet, +} + +impl UploadProgress { + pub fn new(snapshot_id: String) -> Self { + Self { + snapshot_id, + metadata_uploaded: false, + wasm_module_offset: 0, + wasm_memory_offset: 0, + stable_memory_offset: 0, + wasm_chunks_uploaded: HashSet::new(), + } + } +} + +/// Tracks download progress for a single blob. +/// Uses a write frontier plus a set of chunks completed ahead of the frontier. +/// All chunks before the frontier are assumed complete. Only gaps (chunks ahead +/// of the frontier) are tracked, so memory usage is bounded. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct BlobDownloadProgress { + /// The write frontier - all chunks with offset < frontier are complete. + pub frontier: u64, + /// Chunk offsets that completed ahead of the frontier (filled gaps). + pub ahead: HashSet, +} + +impl BlobDownloadProgress { + /// Record a completed chunk and advance the frontier if possible. + pub fn mark_complete(&mut self, offset: u64, total_size: u64) { + if offset < self.frontier { + // Already implicitly complete + return; + } + + if offset > self.frontier { + // Completed ahead of frontier + self.ahead.insert(offset); + return; + } + + // offset == frontier: advance it + self.frontier += chunk_size_at(offset, total_size); + + // Advance further using any chunks that are now at the frontier + while self.ahead.remove(&self.frontier) { + self.frontier += chunk_size_at(self.frontier, total_size); + } + } + + /// Check if a chunk at the given offset needs to be downloaded. + pub fn needs_download(&self, offset: u64) -> bool { + offset >= self.frontier && !self.ahead.contains(&offset) + } + + /// Check if download is complete for a blob of the given total size. + pub fn is_complete(&self, total_size: u64) -> bool { + self.frontier >= total_size + } +} + +/// Calculate chunk size at a given offset for a blob of total_size. +fn chunk_size_at(offset: u64, total_size: u64) -> u64 { + std::cmp::min(MAX_CHUNK_SIZE, total_size.saturating_sub(offset)) +} + +/// Tracks download progress for resumable downloads. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct DownloadProgress { + pub wasm_module: BlobDownloadProgress, + pub wasm_memory: BlobDownloadProgress, + pub stable_memory: BlobDownloadProgress, +} + +impl DownloadProgress { + pub fn blob_progress(&self, blob_type: BlobType) -> &BlobDownloadProgress { + match blob_type { + BlobType::WasmModule => &self.wasm_module, + BlobType::WasmMemory => &self.wasm_memory, + BlobType::StableMemory => &self.stable_memory, + } + } + + pub fn blob_progress_mut(&mut self, blob_type: BlobType) -> &mut BlobDownloadProgress { + match blob_type { + BlobType::WasmModule => &mut self.wasm_module, + BlobType::WasmMemory => &mut self.wasm_memory, + BlobType::StableMemory => &mut self.stable_memory, + } + } +} + +/// Identifies which type of blob is being transferred. +#[derive(Debug, Clone, Copy)] +pub enum BlobType { + WasmModule, + WasmMemory, + StableMemory, +} + +impl BlobType { + pub fn make_read_kind(&self, offset: u64, size: u64) -> SnapshotDataKind { + match self { + BlobType::WasmModule => SnapshotDataKind::WasmModule { offset, size }, + BlobType::WasmMemory => SnapshotDataKind::WasmMemory { offset, size }, + BlobType::StableMemory => SnapshotDataKind::StableMemory { offset, size }, + } + } + + pub fn make_upload_offset(&self, offset: u64) -> SnapshotDataOffset { + match self { + BlobType::WasmModule => SnapshotDataOffset::WasmModule { offset }, + BlobType::WasmMemory => SnapshotDataOffset::WasmMemory { offset }, + BlobType::StableMemory => SnapshotDataOffset::StableMemory { offset }, + } + } +} + +/// Check if an agent error is retryable. +fn is_retryable(error: &AgentError) -> bool { + matches!( + error, + AgentError::TimeoutWaitingForResponse() | AgentError::TransportError(_) + ) +} + +/// Execute an async operation with exponential backoff retry. +async fn with_retry(operation: F) -> Result +where + F: Fn() -> Fut, + Fut: std::future::Future>, +{ + let mut backoff = ExponentialBackoff { + max_elapsed_time: Some(std::time::Duration::from_secs(60)), + ..ExponentialBackoff::default() + }; + + loop { + match operation().await { + Ok(result) => return Ok(result), + Err(err) if is_retryable(&err) => { + if let Some(duration) = backoff.next_backoff() { + debug!("Retryable error, waiting {:?}: {}", duration, err); + tokio::time::sleep(duration).await; + } else { + return Err(err); + } + } + Err(err) => return Err(err), + } + } +} + +/// Create a progress bar for byte transfers. +pub fn create_transfer_progress_bar(total_bytes: u64, label: &str) -> ProgressBar { + let pb = ProgressBar::new(total_bytes); + pb.set_style( + ProgressStyle::default_bar() + .template("{prefix} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})") + .expect("invalid progress bar template") + .progress_chars("#>-"), + ); + pb.set_prefix(label.to_string()); + pb +} + +/// Read snapshot metadata from a canister. +pub async fn read_snapshot_metadata( + agent: &Agent, + canister_id: Principal, + snapshot_id: &[u8], +) -> Result { + let mgmt = ManagementCanister::create(agent); + + let args = ReadCanisterSnapshotMetadataArgs { + canister_id, + snapshot_id: snapshot_id.to_vec(), + }; + + let (metadata,) = with_retry(|| async { + mgmt.read_canister_snapshot_metadata(&canister_id, &args) + .await + }) + .await + .context(ReadMetadataSnafu { canister_id })?; + + Ok(metadata) +} + +/// Upload snapshot metadata to create a new snapshot. +pub async fn upload_snapshot_metadata( + agent: &Agent, + canister_id: Principal, + metadata: &ReadCanisterSnapshotMetadataResult, + replace_snapshot: Option<&[u8]>, +) -> Result { + let mgmt = ManagementCanister::create(agent); + + let args = UploadCanisterSnapshotMetadataArgs { + canister_id, + replace_snapshot: replace_snapshot.map(|s| s.to_vec()), + wasm_module_size: metadata.wasm_module_size, + globals: metadata.globals.clone(), + wasm_memory_size: metadata.wasm_memory_size, + stable_memory_size: metadata.stable_memory_size, + certified_data: metadata.certified_data.clone(), + global_timer: metadata.global_timer.clone(), + on_low_wasm_memory_hook_status: metadata.on_low_wasm_memory_hook_status.clone(), + }; + + let (result,) = with_retry(|| async { + mgmt.upload_canister_snapshot_metadata(&canister_id, &args) + .await + }) + .await + .context(UploadMetadataSnafu { canister_id })?; + + Ok(result) +} + +/// Download a blob (wasm_module, wasm_memory, or stable_memory) to a file. +/// +/// Writes chunks directly at their offset (no in-memory buffering). +/// Tracks progress so gaps can be filled on resume. +/// The agent handles rate limiting and semaphoring internally. +pub async fn download_blob_to_file( + agent: &Agent, + canister_id: Principal, + snapshot_id: &[u8], + blob_type: BlobType, + total_size: u64, + paths: LWrite<&SnapshotPaths>, + progress: &mut DownloadProgress, + progress_bar: &ProgressBar, +) -> Result<(), SnapshotTransferError> { + let output_path = paths.blob_path(blob_type); + + if total_size == 0 { + icp::fs::write(&output_path, &[])?; + return Ok(()); + } + + let blob_progress = progress.blob_progress(blob_type); + if blob_progress.is_complete(total_size) { + return Ok(()); + } + + let mgmt = ManagementCanister::create(agent); + + // Create or open file for random-access writing + let file = if output_path.exists() { + File::options() + .write(true) + .open(&output_path) + .await + .context(OpenBlobForResumeSnafu { path: &output_path })? + } else { + // Pre-allocate file to total size + let f = File::create(&output_path) + .await + .context(CreateBlobFileSnafu { path: &output_path })?; + f.set_len(total_size) + .await + .context(CreateBlobFileSnafu { path: &output_path })?; + f + }; + + // Set initial progress based on frontier + let initial_bytes = progress.blob_progress(blob_type).frontier; + progress_bar.set_position(initial_bytes); + + // Determine which chunks need downloading + let snapshot_id_vec = snapshot_id.to_vec(); + let mut in_progress: FuturesUnordered<_> = FuturesUnordered::new(); + + let mut offset = 0u64; + while offset < total_size { + let chunk_size = chunk_size_at(offset, total_size); + + if progress.blob_progress(blob_type).needs_download(offset) { + let chunk_offset = offset; + let args = ReadCanisterSnapshotDataArgs { + canister_id, + snapshot_id: snapshot_id_vec.clone(), + kind: blob_type.make_read_kind(chunk_offset, chunk_size), + }; + + let mgmt = mgmt.clone(); + in_progress.push(async move { + let result = with_retry(|| async { + mgmt.read_canister_snapshot_data(&canister_id, &args).await + }) + .await + .context(ReadDataChunkSnafu { + offset: chunk_offset, + })?; + Ok::<_, SnapshotTransferError>((chunk_offset, result.0.chunk)) + }); + } + + offset += chunk_size; + } + + // Process completed chunks - write directly at offset + use std::sync::Arc; + use tokio::sync::Mutex; + let file = Arc::new(Mutex::new(file)); + + while let Some(result) = in_progress.next().await { + let (chunk_offset, chunk) = result?; + + // Write chunk at its offset + { + let mut f = file.lock().await; + f.seek(SeekFrom::Start(chunk_offset)) + .await + .context(SeekBlobFileSnafu { path: &output_path })?; + f.write_all(&chunk) + .await + .context(WriteBlobChunkSnafu { path: &output_path })?; + f.sync_data() + .await + .context(FlushBlobFileSnafu { path: &output_path })?; + } + + // Update progress + progress + .blob_progress_mut(blob_type) + .mark_complete(chunk_offset, total_size); + save_download_progress(progress, paths)?; + + // Update progress bar to show frontier position + progress_bar.set_position(progress.blob_progress(blob_type).frontier); + } + + Ok(()) +} + +/// Download a single WASM chunk by hash. +pub async fn download_wasm_chunk( + agent: &Agent, + canister_id: Principal, + snapshot_id: &[u8], + chunk_hash: &ChunkHash, + paths: LWrite<&SnapshotPaths>, +) -> Result<(), SnapshotTransferError> { + let mgmt = ManagementCanister::create(agent); + + let args = ReadCanisterSnapshotDataArgs { + canister_id, + snapshot_id: snapshot_id.to_vec(), + kind: SnapshotDataKind::WasmChunk { + hash: chunk_hash.hash.clone(), + }, + }; + + let hash_hex = hex::encode(&chunk_hash.hash); + let output_path = paths.wasm_chunk_path(&chunk_hash.hash); + + let (result,) = + with_retry(|| async { mgmt.read_canister_snapshot_data(&canister_id, &args).await }) + .await + .context(ReadWasmChunkSnafu { hash: &hash_hex })?; + + icp::fs::write(&output_path, &result.chunk)?; + + Ok(()) +} + +/// Upload a blob (wasm_module, wasm_memory, or stable_memory) from a file. +/// +/// Uses parallel chunk uploading. The agent handles rate limiting internally. +/// Saves progress after each successful chunk for resume support. +/// Returns the final byte offset after all uploads complete. +pub async fn upload_blob_from_file( + agent: &Agent, + canister_id: Principal, + snapshot_id: &[u8], + blob_type: BlobType, + paths: LWrite<&SnapshotPaths>, + progress: &mut UploadProgress, + progress_bar: &ProgressBar, +) -> Result { + let input_path = paths.blob_path(blob_type); + let file_size = std::fs::metadata(&input_path) + .context(GetBlobFileSizeSnafu { path: &input_path })? + .len(); + + if file_size == 0 { + return Ok(0); + } + + let start_offset = match blob_type { + BlobType::WasmModule => progress.wasm_module_offset, + BlobType::WasmMemory => progress.wasm_memory_offset, + BlobType::StableMemory => progress.stable_memory_offset, + }; + + let mgmt = ManagementCanister::create(agent); + + let mut file = File::open(&input_path) + .await + .context(OpenBlobForUploadSnafu { path: &input_path })?; + + if start_offset > 0 { + file.seek(SeekFrom::Start(start_offset)) + .await + .context(SeekBlobFileSnafu { path: &input_path })?; + } + + progress_bar.set_position(start_offset); + + // Read all chunks and launch uploads concurrently + let snapshot_id_vec = snapshot_id.to_vec(); + let mut in_progress: FuturesUnordered<_> = FuturesUnordered::new(); + + let mut current_offset = start_offset; + while current_offset < file_size { + let chunk_size = std::cmp::min(MAX_CHUNK_SIZE, file_size - current_offset) as usize; + let mut chunk = vec![0u8; chunk_size]; + file.read_exact(&mut chunk) + .await + .context(ReadBlobChunkSnafu { path: &input_path })?; + + let offset = current_offset; + current_offset += chunk_size as u64; + + let args = UploadCanisterSnapshotDataArgs { + canister_id, + snapshot_id: snapshot_id_vec.clone(), + kind: blob_type.make_upload_offset(offset), + chunk, + }; + + let mgmt = mgmt.clone(); + in_progress.push(async move { + with_retry(|| async { + mgmt.upload_canister_snapshot_data(&canister_id, &args) + .await + }) + .await + .context(UploadDataChunkSnafu { offset })?; + Ok::<_, SnapshotTransferError>((offset, args.chunk.len() as u64)) + }); + } + + // Track completed uploads for ordered progress reporting + let mut completed: BTreeMap = BTreeMap::new(); + let mut next_report_offset = start_offset; + let mut first_error: Option = None; + + while let Some(result) = in_progress.next().await { + match result { + Ok((offset, size)) => { + completed.insert(offset, size); + + // Update progress in order and save after each advancement + while let Some(&size) = completed.get(&next_report_offset) { + completed.remove(&next_report_offset); + next_report_offset += size; + progress_bar.set_position(next_report_offset); + + // Update and save progress + match blob_type { + BlobType::WasmModule => progress.wasm_module_offset = next_report_offset, + BlobType::WasmMemory => progress.wasm_memory_offset = next_report_offset, + BlobType::StableMemory => { + progress.stable_memory_offset = next_report_offset + } + } + save_upload_progress(progress, paths)?; + } + } + Err(e) => { + // Record first error but continue processing to save any completed chunks + if first_error.is_none() { + first_error = Some(e); + } + } + } + } + + // Return error if any chunk failed + if let Some(e) = first_error { + return Err(e); + } + + Ok(next_report_offset) +} + +/// Upload a single WASM chunk. +pub async fn upload_wasm_chunk( + agent: &Agent, + canister_id: Principal, + snapshot_id: &[u8], + chunk_hash: &[u8], + paths: LWrite<&SnapshotPaths>, +) -> Result<(), SnapshotTransferError> { + let mgmt = ManagementCanister::create(agent); + + let chunk_path = paths.wasm_chunk_path(chunk_hash); + let chunk = icp::fs::read(&chunk_path)?; + + let args = UploadCanisterSnapshotDataArgs { + canister_id, + snapshot_id: snapshot_id.to_vec(), + kind: SnapshotDataOffset::WasmChunk, + chunk, + }; + + let hash_hex = hex::encode(chunk_hash); + + with_retry(|| async { + mgmt.upload_canister_snapshot_data(&canister_id, &args) + .await + }) + .await + .context(UploadWasmChunkSnafu { hash: hash_hex })?; + + Ok(()) +} + +/// Save upload progress to a file. +pub fn save_upload_progress( + progress: &UploadProgress, + paths: LWrite<&SnapshotPaths>, +) -> Result<(), SnapshotTransferError> { + icp::fs::json::save(&paths.upload_progress_path(), progress)?; + Ok(()) +} + +/// Load upload progress from a file. +pub fn load_upload_progress( + paths: LWrite<&SnapshotPaths>, +) -> Result { + let progress_path = paths.upload_progress_path(); + if !progress_path.exists() { + return Err(SnapshotTransferError::NoUploadProgress { + path: paths.dir().to_path_buf(), + }); + } + Ok(icp::fs::json::load(&progress_path)?) +} + +/// Delete upload progress file. +pub fn delete_upload_progress(paths: LWrite<&SnapshotPaths>) -> Result<(), SnapshotTransferError> { + let progress_path = paths.upload_progress_path(); + if progress_path.exists() { + icp::fs::remove_file(&progress_path)?; + } + Ok(()) +} + +/// Save download progress to a file atomically. +pub fn save_download_progress( + progress: &DownloadProgress, + paths: LWrite<&SnapshotPaths>, +) -> Result<(), SnapshotTransferError> { + use std::io::Write; + + let target_path = paths.download_progress_path(); + let tmp_path = paths.dir().join(".download_progress.json.tmp"); + + // Write to temp file + let contents = + serde_json::to_string_pretty(progress).expect("DownloadProgress is always serializable"); + let mut file = std::fs::File::create(&tmp_path) + .context(CreateDownloadProgressSnafu { path: &tmp_path })?; + file.write_all(contents.as_bytes()) + .context(WriteDownloadProgressSnafu { path: &tmp_path })?; + file.sync_all() + .context(SyncDownloadProgressSnafu { path: &tmp_path })?; + drop(file); + + // Atomic rename + icp::fs::rename(&tmp_path, &target_path)?; + + Ok(()) +} + +/// Load download progress from a file, or return default if none exists. +pub fn load_download_progress( + paths: LWrite<&SnapshotPaths>, +) -> Result { + Ok(icp::fs::json::load_or_default( + &paths.download_progress_path(), + )?) +} + +/// Delete download progress file. +pub fn delete_download_progress( + paths: LWrite<&SnapshotPaths>, +) -> Result<(), SnapshotTransferError> { + let progress_path = paths.download_progress_path(); + if progress_path.exists() { + icp::fs::remove_file(&progress_path)?; + } + Ok(()) +} + +/// Save metadata to the snapshot directory. +pub fn save_metadata( + metadata: &ReadCanisterSnapshotMetadataResult, + paths: LWrite<&SnapshotPaths>, +) -> Result<(), SnapshotTransferError> { + icp::fs::json::save(&paths.metadata_path(), metadata)?; + Ok(()) +} + +/// Load metadata from the snapshot directory. +pub fn load_metadata( + paths: LWrite<&SnapshotPaths>, +) -> Result { + let metadata_path = paths.metadata_path(); + if !metadata_path.exists() { + return Err(SnapshotTransferError::MissingFile { + path: metadata_path, + }); + } + Ok(icp::fs::json::load(&metadata_path)?) +} diff --git a/crates/icp-cli/tests/assets/limit_transfer.py b/crates/icp-cli/tests/assets/limit_transfer.py new file mode 100644 index 00000000..611a1322 --- /dev/null +++ b/crates/icp-cli/tests/assets/limit_transfer.py @@ -0,0 +1,62 @@ +""" +mitmproxy addon that allows a limited number of request/response pairs through. +Usage: mitmdump --mode reverse:http://localhost:PORT -p PROXY_PORT -s limit_transfer.py +Set LIMIT_REQUESTS environment variable to control how many requests to allow (default: 2). + +Requests are serialized - only one in flight at a time. After the limit is reached, +subsequent requests are killed. +Default of 2 allows metadata + one data chunk through. +""" + +from mitmproxy import ctx, http +import os +import asyncio + +class LimitTransfer: + def __init__(self): + self.requests_allowed = int(os.environ.get("LIMIT_REQUESTS", 2)) + self.requests_completed = 0 + self.in_flight = False + self.waiters = [] + ctx.log.info(f"LimitTransfer: allowing {self.requests_allowed} requests") + + async def request(self, flow: http.HTTPFlow): + # If limit reached, kill immediately + if self.requests_completed >= self.requests_allowed: + ctx.log.info(f"LimitTransfer: killing request (limit reached)") + flow.kill() + return + + # If another request is in flight, wait + if self.in_flight: + # Check if we'd exceed limit when this eventually runs + if self.requests_completed + len(self.waiters) + 1 >= self.requests_allowed: + ctx.log.info(f"LimitTransfer: killing request (would exceed limit)") + flow.kill() + return + + event = asyncio.Event() + self.waiters.append(event) + ctx.log.info(f"LimitTransfer: stalling request") + await event.wait() + + # Check again after waking + if self.requests_completed >= self.requests_allowed: + ctx.log.info(f"LimitTransfer: killing request after wait (limit reached)") + flow.kill() + return + + self.in_flight = True + ctx.log.info(f"LimitTransfer: allowing request ({self.requests_completed + 1}/{self.requests_allowed})") + + def response(self, flow: http.HTTPFlow): + self.requests_completed += 1 + self.in_flight = False + ctx.log.info(f"LimitTransfer: {self.requests_completed}/{self.requests_allowed} requests completed") + + # Wake next waiter if under limit + if self.waiters and self.requests_completed < self.requests_allowed: + waiter = self.waiters.pop(0) + waiter.set() + +addons = [LimitTransfer()] diff --git a/crates/icp-cli/tests/canister_snapshot_tests.rs b/crates/icp-cli/tests/canister_snapshot_tests.rs index c599c60e..18f158e3 100644 --- a/crates/icp-cli/tests/canister_snapshot_tests.rs +++ b/crates/icp-cli/tests/canister_snapshot_tests.rs @@ -404,6 +404,274 @@ async fn canister_snapshot_replace() { ); } +/// Tests downloading a snapshot to disk and uploading it back +#[cfg(unix)] // moc +#[tokio::test] +async fn canister_snapshot_download_upload_roundtrip() { + let ctx = TestContext::new(); + let project_dir = ctx.create_project_dir("icp"); + let snapshot_dir = ctx.create_project_dir("snapshot"); + + ctx.copy_asset_dir("echo_init_arg_canister", &project_dir); + + let pm = formatdoc! {r#" + canisters: + - name: my-canister + recipe: + type: "@dfinity/motoko" + configuration: + main: main.mo + args: "" + init_args: "(opt 42 : opt nat8)" + + {NETWORK_RANDOM_PORT} + {ENVIRONMENT_RANDOM_PORT} + "#}; + + write_string(&project_dir.join("icp.yaml"), &pm).expect("failed to write project manifest"); + + let _g = ctx.start_network_in(&project_dir, "random-network").await; + ctx.ping_until_healthy(&project_dir, "random-network"); + + clients::icp(&ctx, &project_dir, Some("random-environment".to_string())) + .mint_cycles(10 * TRILLION); + + // Deploy canister with initial value 42 + ctx.icp() + .current_dir(&project_dir) + .args([ + "deploy", + "my-canister", + "--environment", + "random-environment", + ]) + .assert() + .success(); + + // Verify initial value + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "call", + "--environment", + "random-environment", + "my-canister", + "get", + "()", + ]) + .assert() + .success() + .stdout(contains("\"42\"")); + + // Stop the canister before creating snapshot + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "stop", + "my-canister", + "--environment", + "random-environment", + ]) + .assert() + .success(); + + // Create a snapshot + let create_output = ctx + .icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "create", + "my-canister", + "--environment", + "random-environment", + ]) + .assert() + .success() + .get_output() + .stdout + .clone(); + + let original_snapshot_id = String::from_utf8_lossy(&create_output) + .lines() + .find(|line| line.contains("Created snapshot")) + .and_then(|line| line.split_whitespace().nth(2)) + .expect("Could not extract snapshot ID") + .to_string(); + + // Download the snapshot + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "download", + "my-canister", + &original_snapshot_id, + "--output", + snapshot_dir.as_str(), + "--environment", + "random-environment", + ]) + .assert() + .success() + .stdout(contains("Snapshot downloaded")); + + // Verify metadata file was created + assert!( + snapshot_dir.join("metadata.json").exists(), + "metadata.json should exist" + ); + + // Delete the original snapshot + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "delete", + "my-canister", + &original_snapshot_id, + "--environment", + "random-environment", + ]) + .assert() + .success(); + + // Upload the snapshot to create a new one + let upload_output = ctx + .icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "upload", + "my-canister", + "--input", + snapshot_dir.as_str(), + "--environment", + "random-environment", + ]) + .assert() + .success() + .get_output() + .stdout + .clone(); + + let uploaded_snapshot_id = String::from_utf8_lossy(&upload_output) + .lines() + .find(|line| line.contains("uploaded successfully")) + .and_then(|line| line.split_whitespace().nth(1)) // "Snapshot uploaded successfully" + .expect("Could not extract uploaded snapshot ID") + .to_string(); + + // The uploaded snapshot should have a different ID + assert_ne!(original_snapshot_id, uploaded_snapshot_id); + + // Reinstall canister with different value + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "start", + "my-canister", + "--environment", + "random-environment", + ]) + .assert() + .success(); + + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "install", + "my-canister", + "--environment", + "random-environment", + "--mode", + "reinstall", + "--args", + "(opt 99 : opt nat8)", + ]) + .assert() + .success(); + + // Verify value changed + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "call", + "--environment", + "random-environment", + "my-canister", + "get", + "()", + ]) + .assert() + .success() + .stdout(contains("\"99\"")); + + // Stop and restore from uploaded snapshot + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "stop", + "my-canister", + "--environment", + "random-environment", + ]) + .assert() + .success(); + + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "restore", + "my-canister", + &uploaded_snapshot_id, + "--environment", + "random-environment", + ]) + .assert() + .success(); + + // Start and verify value is back to 42 + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "start", + "my-canister", + "--environment", + "random-environment", + ]) + .assert() + .success(); + + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "call", + "--environment", + "random-environment", + "my-canister", + "get", + "()", + ]) + .assert() + .success() + .stdout(contains("\"42\"")); +} + /// Tests that running canisters cannot be snapshotted or restored #[cfg(unix)] // moc #[tokio::test] @@ -528,3 +796,446 @@ async fn canister_snapshot_requires_stopped() { .stderr(contains("currently running")) .stderr(contains("icp canister stop")); } + +/// Helper to generate large.wasm if it doesn't exist +fn ensure_large_wasm(ctx: &TestContext) -> PathBuf { + let script_path = ctx.pkg_dir().join("tests/assets/generate_large_wasm.sh"); + let wasm_path = ctx.pkg_dir().join("tests/assets/large.wasm"); + + if !wasm_path.exists() { + std::process::Command::new("bash") + .arg(&script_path) + .current_dir(ctx.pkg_dir().join("tests/assets")) + .status() + .expect("failed to run generate_large_wasm.sh"); + } + + assert!( + wasm_path.exists(), + "large.wasm should exist after generation" + ); + wasm_path +} + +/// Helper to start mitmproxy as a reverse proxy +struct MitmproxyGuard { + child: std::process::Child, + port: u16, +} + +impl MitmproxyGuard { + /// Start mitmproxy allowing `limit_requests` request/response pairs through. + /// Default of 2 allows metadata + one data chunk. + fn start(target_port: u16, limit_requests: u32) -> Self { + // Find a free port for mitmproxy + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let proxy_port = listener.local_addr().unwrap().port(); + drop(listener); + + let script_path = std::env::var("CARGO_MANIFEST_DIR") + .map(PathBuf::from) + .unwrap_or_else(|_| PathBuf::from(".")) + .join("tests/assets/limit_transfer.py"); + + let child = std::process::Command::new("mitmdump") + .args([ + "--mode", + &format!("reverse:http://localhost:{target_port}"), + "-p", + &proxy_port.to_string(), + "-s", + script_path.as_str(), + "--set", + "flow_detail=0", + "-q", + ]) + .env("LIMIT_REQUESTS", limit_requests.to_string()) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .spawn() + .expect("failed to start mitmproxy - is it installed?"); + + // Give mitmproxy time to start + std::thread::sleep(std::time::Duration::from_millis(500)); + + Self { + child, + port: proxy_port, + } + } +} + +impl Drop for MitmproxyGuard { + fn drop(&mut self) { + let _ = self.child.kill(); + let _ = self.child.wait(); + } +} + +/// Tests that download can resume after interruption +#[cfg(unix)] +#[tokio::test] +async fn canister_snapshot_download_resume() { + let ctx = TestContext::new(); + let project_dir = ctx.create_project_dir("icp"); + let snapshot_dir = ctx.create_project_dir("snapshot"); + + // Get the large wasm + let large_wasm = ensure_large_wasm(&ctx); + + // Project manifest using prebuilt large.wasm + let pm = formatdoc! {r#" + canisters: + - name: large-canister + recipe: + type: "@dfinity/prebuilt" + configuration: + path: "{wasm_path}" + + {NETWORK_RANDOM_PORT} + {ENVIRONMENT_RANDOM_PORT} + "#, wasm_path = large_wasm.as_str()}; + + write_string(&project_dir.join("icp.yaml"), &pm).expect("failed to write project manifest"); + + let _g = ctx.start_network_in(&project_dir, "random-network").await; + ctx.ping_until_healthy(&project_dir, "random-network"); + + // Get the real network port from the descriptor + let descriptor_bytes = ctx.read_network_descriptor(&project_dir, "random-network"); + let descriptor: serde_json::Value = + serde_json::from_slice(&descriptor_bytes).expect("invalid descriptor JSON"); + let real_port = descriptor["gateway"]["port"].as_u64().unwrap() as u16; + + clients::icp(&ctx, &project_dir, Some("random-environment".to_string())) + .mint_cycles(100 * TRILLION); + + // Deploy the large canister + ctx.icp() + .current_dir(&project_dir) + .args([ + "deploy", + "large-canister", + "--environment", + "random-environment", + ]) + .assert() + .success(); + + // Stop and create snapshot + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "stop", + "large-canister", + "--environment", + "random-environment", + ]) + .assert() + .success(); + + let create_output = ctx + .icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "create", + "large-canister", + "--environment", + "random-environment", + ]) + .assert() + .success() + .get_output() + .stdout + .clone(); + + let snapshot_id = String::from_utf8_lossy(&create_output) + .lines() + .find(|line| line.contains("Created snapshot")) + .and_then(|line| line.split_whitespace().nth(2)) + .expect("Could not extract snapshot ID") + .to_string(); + + // Start mitmproxy allowing 2 requests: metadata + one data chunk + let proxy = MitmproxyGuard::start(real_port, 2); + + // Modify the network descriptor to route through mitmproxy + let mut modified_descriptor = descriptor.clone(); + modified_descriptor["gateway"]["port"] = serde_json::json!(proxy.port); + ctx.write_network_descriptor( + &project_dir, + "random-network", + serde_json::to_vec_pretty(&modified_descriptor) + .unwrap() + .as_slice(), + ); + + // First download attempt should fail (proxy cuts off after 1 chunk) + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "download", + "large-canister", + &snapshot_id, + "--output", + snapshot_dir.as_str(), + "--environment", + "random-environment", + ]) + .assert() + .failure(); + + // Verify partial download state exists + assert!( + snapshot_dir.join("metadata.json").exists(), + "metadata.json should exist after partial download" + ); + assert!( + snapshot_dir.join(".download_progress.json").exists(), + "download progress file should exist" + ); + + // Verify progress file shows intermediate state (some progress but not complete) + let progress_content = + std::fs::read_to_string(snapshot_dir.join(".download_progress.json")).unwrap(); + let progress: serde_json::Value = serde_json::from_str(&progress_content).unwrap(); + let frontier = progress["wasm_module"]["frontier"].as_u64().unwrap(); + let ahead_count = progress["wasm_module"]["ahead"] + .as_array() + .map(|a| a.len()) + .unwrap_or(0); + // Wasm module is ~3MB, chunk size is 2MB. Intermediate state means exactly one chunk done. + // Either frontier=2MB (first chunk done in order) or frontier=0 with one ahead chunk. + let chunks_done = (frontier / 2_000_000) as usize + ahead_count; + assert_eq!( + chunks_done, 1, + "exactly one chunk should have completed (frontier={frontier}, ahead={ahead_count})" + ); + + // Restore the real network descriptor for the resume + ctx.write_network_descriptor(&project_dir, "random-network", &descriptor_bytes); + + // Resume download should succeed + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "download", + "large-canister", + &snapshot_id, + "--output", + snapshot_dir.as_str(), + "--resume", + "--environment", + "random-environment", + ]) + .assert() + .success() + .stdout(contains("Snapshot downloaded")); + + // Progress file should be cleaned up + assert!( + !snapshot_dir.join(".download_progress.json").exists(), + "download progress file should be cleaned up after success" + ); +} + +/// Tests that upload can resume after interruption +#[cfg(unix)] +#[tokio::test] +async fn canister_snapshot_upload_resume() { + let ctx = TestContext::new(); + let project_dir = ctx.create_project_dir("icp"); + let snapshot_dir = ctx.create_project_dir("snapshot"); + + // Get the large wasm + let large_wasm = ensure_large_wasm(&ctx); + + // Project manifest using prebuilt large.wasm + let pm = formatdoc! {r#" + canisters: + - name: large-canister + recipe: + type: "@dfinity/prebuilt" + configuration: + path: "{wasm_path}" + + {NETWORK_RANDOM_PORT} + {ENVIRONMENT_RANDOM_PORT} + "#, wasm_path = large_wasm.as_str()}; + + write_string(&project_dir.join("icp.yaml"), &pm).expect("failed to write project manifest"); + + let _g = ctx.start_network_in(&project_dir, "random-network").await; + ctx.ping_until_healthy(&project_dir, "random-network"); + + // Get the real network port from the descriptor + let descriptor_bytes = ctx.read_network_descriptor(&project_dir, "random-network"); + let descriptor: serde_json::Value = + serde_json::from_slice(&descriptor_bytes).expect("invalid descriptor JSON"); + let real_port = descriptor["gateway"]["port"].as_u64().unwrap() as u16; + + clients::icp(&ctx, &project_dir, Some("random-environment".to_string())) + .mint_cycles(100 * TRILLION); + + // Deploy the large canister + ctx.icp() + .current_dir(&project_dir) + .args([ + "deploy", + "large-canister", + "--environment", + "random-environment", + ]) + .assert() + .success(); + + // Stop and create snapshot + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "stop", + "large-canister", + "--environment", + "random-environment", + ]) + .assert() + .success(); + + let create_output = ctx + .icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "create", + "large-canister", + "--environment", + "random-environment", + ]) + .assert() + .success() + .get_output() + .stdout + .clone(); + + let snapshot_id = String::from_utf8_lossy(&create_output) + .lines() + .find(|line| line.contains("Created snapshot")) + .and_then(|line| line.split_whitespace().nth(2)) + .expect("Could not extract snapshot ID") + .to_string(); + + // Download the snapshot completely (without proxy interference) + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "download", + "large-canister", + &snapshot_id, + "--output", + snapshot_dir.as_str(), + "--environment", + "random-environment", + ]) + .assert() + .success(); + + // Delete the snapshot so we can upload a new one + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "delete", + "large-canister", + &snapshot_id, + "--environment", + "random-environment", + ]) + .assert() + .success(); + + // Start mitmproxy allowing 3 requests: status check + metadata upload + one data chunk + let proxy = MitmproxyGuard::start(real_port, 3); + + // Modify the network descriptor to route through mitmproxy + let mut modified_descriptor = descriptor.clone(); + modified_descriptor["gateway"]["port"] = serde_json::json!(proxy.port); + ctx.write_network_descriptor( + &project_dir, + "random-network", + serde_json::to_vec_pretty(&modified_descriptor) + .unwrap() + .as_slice(), + ); + + // First upload attempt should fail + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "upload", + "large-canister", + "--input", + snapshot_dir.as_str(), + "--environment", + "random-environment", + ]) + .assert() + .failure(); + + // Verify upload progress file exists + assert!( + snapshot_dir.join(".upload_progress.json").exists(), + "upload progress file should exist after partial upload" + ); + + // Verify progress file shows intermediate state (some progress but not complete) + let progress_content = + std::fs::read_to_string(snapshot_dir.join(".upload_progress.json")).unwrap(); + let progress: serde_json::Value = serde_json::from_str(&progress_content).unwrap(); + let offset = progress["wasm_module_offset"].as_u64().unwrap(); + // Wasm module is ~3MB. Intermediate state means 0 < offset < 3MB. + assert!( + offset > 0 && offset < 3_000_000, + "exactly one chunk should have been uploaded (offset={offset})" + ); + + // Restore the real network descriptor for the resume + ctx.write_network_descriptor(&project_dir, "random-network", &descriptor_bytes); + + // Resume upload should succeed + ctx.icp() + .current_dir(&project_dir) + .args([ + "canister", + "snapshot", + "upload", + "large-canister", + "--input", + snapshot_dir.as_str(), + "--resume", + "--environment", + "random-environment", + ]) + .assert() + .success() + .stdout(contains("uploaded successfully")); + + // Progress file should be cleaned up + assert!( + !snapshot_dir.join(".upload_progress.json").exists(), + "upload progress file should be cleaned up after success" + ); +} From 8defd1a9836e08584d12a7bc622cfaa072becd06 Mon Sep 17 00:00:00 2001 From: Adam Spofford Date: Mon, 2 Feb 2026 11:35:24 -0800 Subject: [PATCH 2/5] fix test --- crates/icp-cli/tests/canister_snapshot_tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/icp-cli/tests/canister_snapshot_tests.rs b/crates/icp-cli/tests/canister_snapshot_tests.rs index 18f158e3..fd8d3222 100644 --- a/crates/icp-cli/tests/canister_snapshot_tests.rs +++ b/crates/icp-cli/tests/canister_snapshot_tests.rs @@ -1165,8 +1165,8 @@ async fn canister_snapshot_upload_resume() { .assert() .success(); - // Start mitmproxy allowing 3 requests: status check + metadata upload + one data chunk - let proxy = MitmproxyGuard::start(real_port, 3); + // Start mitmproxy allowing 2 requests: metadata upload + one data chunk + let proxy = MitmproxyGuard::start(real_port, 2); // Modify the network descriptor to route through mitmproxy let mut modified_descriptor = descriptor.clone(); From 68d68bdb64e16cd3f0c736c82c542271bd6730a4 Mon Sep 17 00:00:00 2001 From: Adam Spofford Date: Tue, 3 Feb 2026 01:49:42 -0800 Subject: [PATCH 3/5] Wait properly for mitmdump --- crates/icp-cli/tests/canister_snapshot_tests.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/crates/icp-cli/tests/canister_snapshot_tests.rs b/crates/icp-cli/tests/canister_snapshot_tests.rs index fd8d3222..ebdbd77b 100644 --- a/crates/icp-cli/tests/canister_snapshot_tests.rs +++ b/crates/icp-cli/tests/canister_snapshot_tests.rs @@ -855,13 +855,18 @@ impl MitmproxyGuard { .spawn() .expect("failed to start mitmproxy - is it installed?"); - // Give mitmproxy time to start - std::thread::sleep(std::time::Duration::from_millis(500)); - - Self { - child, - port: proxy_port, + // Wait for mitmproxy to start listening (up to 5 seconds) + let start = std::time::Instant::now(); + while start.elapsed() < std::time::Duration::from_secs(5) { + if std::net::TcpStream::connect(format!("127.0.0.1:{proxy_port}")).is_ok() { + return Self { + child, + port: proxy_port, + }; + } + std::thread::sleep(std::time::Duration::from_millis(100)); } + panic!("mitmproxy failed to start listening on port {proxy_port} within 5 seconds"); } } From 7825f79ce8423f9799a9bdfd09a71cbbc43b73de Mon Sep 17 00:00:00 2001 From: Adam Spofford Date: Tue, 3 Feb 2026 02:01:22 -0800 Subject: [PATCH 4/5] ubuntu-22.04 has a long outdated mitmproxy version --- .github/scripts/provision-linux-test.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/scripts/provision-linux-test.sh b/.github/scripts/provision-linux-test.sh index 93ed67f5..b896d2af 100755 --- a/.github/scripts/provision-linux-test.sh +++ b/.github/scripts/provision-linux-test.sh @@ -1,3 +1,4 @@ #!/bin/bash set -euo pipefail -sudo apt-get update && sudo apt-get install -y softhsm2 mitmproxy +sudo apt-get update && sudo apt-get install -y softhsm2 pipx +pipx install mitmproxy From 4587d0e04c4bf944a09c5cfc3b4b3abe846f0706 Mon Sep 17 00:00:00 2001 From: Adam Spofford Date: Tue, 3 Feb 2026 02:27:40 -0800 Subject: [PATCH 5/5] lint --- .../icp-cli/tests/canister_snapshot_tests.rs | 2 +- docs/reference/cli.md | 46 +++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/crates/icp-cli/tests/canister_snapshot_tests.rs b/crates/icp-cli/tests/canister_snapshot_tests.rs index ebdbd77b..f3eca4b0 100644 --- a/crates/icp-cli/tests/canister_snapshot_tests.rs +++ b/crates/icp-cli/tests/canister_snapshot_tests.rs @@ -836,7 +836,7 @@ impl MitmproxyGuard { .map(PathBuf::from) .unwrap_or_else(|_| PathBuf::from(".")) .join("tests/assets/limit_transfer.py"); - + #[allow(clippy::zombie_processes)] let child = std::process::Command::new("mitmdump") .args([ "--mode", diff --git a/docs/reference/cli.md b/docs/reference/cli.md index ad5ed5a6..f5cc8515 100644 --- a/docs/reference/cli.md +++ b/docs/reference/cli.md @@ -20,8 +20,10 @@ This document contains the help content for the `icp` command-line program. * [`icp canister snapshot`↴](#icp-canister-snapshot) * [`icp canister snapshot create`↴](#icp-canister-snapshot-create) * [`icp canister snapshot delete`↴](#icp-canister-snapshot-delete) +* [`icp canister snapshot download`↴](#icp-canister-snapshot-download) * [`icp canister snapshot list`↴](#icp-canister-snapshot-list) * [`icp canister snapshot restore`↴](#icp-canister-snapshot-restore) +* [`icp canister snapshot upload`↴](#icp-canister-snapshot-upload) * [`icp canister start`↴](#icp-canister-start) * [`icp canister status`↴](#icp-canister-status) * [`icp canister stop`↴](#icp-canister-stop) @@ -375,8 +377,10 @@ Commands to manage canister snapshots * `create` — Create a snapshot of a canister's state * `delete` — Delete a canister snapshot +* `download` — Download a snapshot to local disk * `list` — List all snapshots for a canister * `restore` — Restore a canister from a snapshot +* `upload` — Upload a snapshot from local disk @@ -418,6 +422,27 @@ Delete a canister snapshot +## `icp canister snapshot download` + +Download a snapshot to local disk + +**Usage:** `icp canister snapshot download [OPTIONS] --output ` + +###### **Arguments:** + +* `` — Name or principal of canister to target When using a name an environment must be specified +* `` — The snapshot ID to download (hex-encoded) + +###### **Options:** + +* `-n`, `--network ` — Name of the network to target, conflicts with environment argument +* `-e`, `--environment ` — Override the environment to connect to. By default, the local environment is used +* `--identity ` — The user identity to run this command as +* `-o`, `--output ` — Output directory for the snapshot files +* `--resume` — Resume a previously interrupted download + + + ## `icp canister snapshot list` List all snapshots for a canister @@ -455,6 +480,27 @@ Restore a canister from a snapshot +## `icp canister snapshot upload` + +Upload a snapshot from local disk + +**Usage:** `icp canister snapshot upload [OPTIONS] --input ` + +###### **Arguments:** + +* `` — Name or principal of canister to target When using a name an environment must be specified + +###### **Options:** + +* `-n`, `--network ` — Name of the network to target, conflicts with environment argument +* `-e`, `--environment ` — Override the environment to connect to. By default, the local environment is used +* `--identity ` — The user identity to run this command as +* `-i`, `--input ` — Input directory containing the snapshot files +* `--replace ` — Replace an existing snapshot instead of creating a new one +* `--resume` — Resume a previously interrupted upload + + + ## `icp canister start` Start a canister on a network