diff --git a/.gitignore b/.gitignore index 1b9cbf1..628b5e4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /target .idea *.hex +/generated-bins/prover.bin diff --git a/Cargo.lock b/Cargo.lock index c3a9f85..4a5aa4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -152,9 +152,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.100" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" +checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" [[package]] name = "aquamarine" @@ -958,9 +958,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.56" +version = "4.5.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75ca66430e33a14957acc24c5077b503e7d374151b2b4b3a10c83b4ceb4be0e" +checksum = "6899ea499e3fb9305a65d5ebf6e3d2248c5fab291f300ad0a704fbe142eae31a" dependencies = [ "clap_builder", "clap_derive", @@ -968,9 +968,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.56" +version = "4.5.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793207c7fa6300a0608d1080b858e5fdbe713cdc1c8db9fb17777d8a13e63df0" +checksum = "7b12c8b680195a62a8364d16b8447b01b6c2c8f9aaf68bee653be34d4245e238" dependencies = [ "anstream", "anstyle", @@ -1195,9 +1195,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.7" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1298,9 +1298,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +checksum = "cc3dc5ad92c2e2d1c193bbbbdf2ea477cb81331de4f3103f267ca18368b988c4" dependencies = [ "powerfmt", ] @@ -1973,9 +1973,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.7" +version = "0.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" dependencies = [ "typenum", "version_check", @@ -2009,6 +2009,19 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", + "wasip3", +] + [[package]] name = "getrandom_or_panic" version = "0.0.3" @@ -2143,7 +2156,6 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", - "rayon", "serde", ] @@ -2322,14 +2334,13 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ "base64", "bytes", "futures-channel", - "futures-core", "futures-util", "http", "http-body", @@ -2449,6 +2460,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -2543,6 +2560,8 @@ checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", + "serde", + "serde_core", ] [[package]] @@ -2809,7 +2828,6 @@ dependencies = [ "once_cell", "serdect", "sha2 0.10.9", - "signature", ] [[package]] @@ -2847,11 +2865,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" -version = "0.2.180" +version = "0.2.181" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +checksum = "459427e2af2b9c839b132acb702a1c654d95e10f8c326bfc2ad11310e458b1c5" [[package]] name = "libm" @@ -3018,9 +3042,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.6" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "memory-db" @@ -3071,19 +3095,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "685a9ac4b61f4e728e1d2c6a7844609c16527aeb5e6c865915c08e619c16410f" -[[package]] -name = "nam-tiny-hderive" -version = "0.3.1-nam.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2cd44792ed5cd84dc9dedc3d572242ac00e76c244e85eb4bf34da2c6239ce30" -dependencies = [ - "base58", - "hmac 0.12.1", - "k256", - "sha2 0.10.9", - "zeroize", -] - [[package]] name = "nodrop" version = "0.1.14" @@ -3553,9 +3564,6 @@ name = "plonky2_maybe_rayon" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1e554181dc95243b8d9948ae7bae5759c7fb2502fed28f671f95ef38079406" -dependencies = [ - "rayon", -] [[package]] name = "plonky2_util" @@ -3639,9 +3647,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "potential_utf" @@ -3793,8 +3801,7 @@ dependencies = [ [[package]] name = "qp-dilithium-crypto" version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2f13d8793f7a79c42d33e4ebe9d470fe938dc55592ef97ef42d4298aa6a976" +source = "git+https://github.com/Quantus-Network/chain?branch=testnet%2Fplanck#fc80f0359a0d3450151c8fe98fd5f92dab312d6a" dependencies = [ "log", "parity-scale-codec", @@ -3810,13 +3817,12 @@ dependencies = [ [[package]] name = "qp-plonky2" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39530b02faa85964bba211e030afa2d54995b403b0022f88e984c4c65679c4bc" +checksum = "593bccf15b8e2f9eb904ef4010f68b81ddcceb70aaf90116ce29ec09d7578dd4" dependencies = [ "ahash", "anyhow", - "getrandom 0.2.17", "hashbrown 0.14.5", "itertools 0.11.0", "keccak-hash 0.8.0", @@ -3828,21 +3834,47 @@ dependencies = [ "p3-symmetric", "plonky2_maybe_rayon", "plonky2_util", + "qp-plonky2-core", + "qp-plonky2-field", + "qp-plonky2-verifier", + "qp-poseidon-constants", + "rand 0.8.5", + "serde", + "static_assertions", + "unroll", +] + +[[package]] +name = "qp-plonky2-core" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7d30fabfd90e359640f2371c8b3e9b377d215f7dcf4e61da1f38776c5b84540" +dependencies = [ + "ahash", + "anyhow", + "hashbrown 0.14.5", + "itertools 0.11.0", + "keccak-hash 0.8.0", + "log", + "num", + "p3-field", + "p3-goldilocks", + "p3-poseidon2", + "p3-symmetric", + "plonky2_util", "qp-plonky2-field", "qp-poseidon-constants", "rand 0.8.5", - "rand_chacha 0.3.1", "serde", "static_assertions", "unroll", - "web-time", ] [[package]] name = "qp-plonky2-field" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8d52dadf3bb92708c309922b62d7f3f2587d3047f9fe05a0c9f587e2890526" +checksum = "20c9f8259bf4f220b1d81001458cc6c09a1372f2b3e8dac2fb489a66230385c3" dependencies = [ "anyhow", "itertools 0.11.0", @@ -3855,6 +3887,32 @@ dependencies = [ "unroll", ] +[[package]] +name = "qp-plonky2-verifier" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0eb89fd3cc40c4b25be95399635957d416406328169ba939db989c0444f364" +dependencies = [ + "ahash", + "anyhow", + "hashbrown 0.14.5", + "itertools 0.11.0", + "keccak-hash 0.8.0", + "log", + "num", + "p3-field", + "p3-goldilocks", + "p3-poseidon2", + "p3-symmetric", + "plonky2_util", + "qp-plonky2-core", + "qp-plonky2-field", + "qp-poseidon-constants", + "serde", + "static_assertions", + "unroll", +] + [[package]] name = "qp-poseidon" version = "1.0.7" @@ -3904,92 +3962,113 @@ dependencies = [ [[package]] name = "qp-rusty-crystals-dilithium" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a42bfb3430fa3bf3f5a148f8132de301876ceb1bdf0891909c2728f044a58" +checksum = "d734438e080d69fa186dac23565dd261fa8146048af00ba8aea7467b429c104b" dependencies = [ - "aes", - "cipher", - "sha2 0.10.9", - "subtle", + "zeroize", ] [[package]] name = "qp-rusty-crystals-hdwallet" -version = "1.0.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fa242963fcd6bc970948b6904f18074673ff89cab8ed0133846a53c69deca7" +checksum = "74d9d8eb6c6a555c831496ab14348a41e4d23aa11943930a568891bf687cd8b1" dependencies = [ "bip39", + "bs58", + "getrandom 0.2.17", "hex", "hex-literal", - "nam-tiny-hderive", + "hmac 0.12.1", + "k256", "qp-poseidon-core", "qp-rusty-crystals-dilithium", - "rand_chacha 0.9.0", - "rand_core 0.9.5", "serde", "serde_json", + "sha2 0.10.9", "thiserror 2.0.18", + "zeroize", ] [[package]] name = "qp-wormhole-aggregator" -version = "0.1.8" -source = "git+https://github.com/Quantus-Network/qp-zk-circuits?branch=illuzen%2Fagg-fees#67cf016f6f0237f61dab644d0724124311a15e1f" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bad3d3f37af4748e635f9197b2145cf4d218b97ad361e6b696724e3ddbb4e12a" dependencies = [ "anyhow", + "hex", "qp-plonky2", "qp-wormhole-circuit", + "qp-wormhole-inputs", "qp-wormhole-prover", - "qp-wormhole-verifier", "qp-zk-circuits-common", + "rand 0.8.5", "rayon", - "test-helpers", + "serde", + "serde_json", + "sha2 0.10.9", ] [[package]] name = "qp-wormhole-circuit" -version = "0.1.8" -source = "git+https://github.com/Quantus-Network/qp-zk-circuits?branch=illuzen%2Fagg-fees#67cf016f6f0237f61dab644d0724124311a15e1f" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7cdfba4fd293063a3e9eb964e2afb58673e9a7fd6d4edb0484783e0ed600927" dependencies = [ "anyhow", "hex", "qp-plonky2", + "qp-wormhole-inputs", "qp-zk-circuits-common", ] +[[package]] +name = "qp-wormhole-inputs" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53ad195630b070fc8cd9d89c55a951abaae9694434793bc87f5ab3045ded7108" +dependencies = [ + "anyhow", +] + [[package]] name = "qp-wormhole-prover" -version = "0.1.8" -source = "git+https://github.com/Quantus-Network/qp-zk-circuits?branch=illuzen%2Fagg-fees#67cf016f6f0237f61dab644d0724124311a15e1f" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d244e8514279f65d25f15ed5a6e6464905ac5276724a9233574696e11a461c3a" dependencies = [ "anyhow", "qp-plonky2", "qp-wormhole-circuit", + "qp-wormhole-inputs", "qp-zk-circuits-common", ] [[package]] name = "qp-wormhole-verifier" -version = "0.1.8" -source = "git+https://github.com/Quantus-Network/qp-zk-circuits?branch=illuzen%2Fagg-fees#67cf016f6f0237f61dab644d0724124311a15e1f" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9e95153853ceceeba61295ca5f1316d12bde37677b0c1e7f0539d815f627645" dependencies = [ "anyhow", - "qp-plonky2", - "qp-wormhole-circuit", - "qp-zk-circuits-common", + "qp-plonky2-verifier", + "qp-wormhole-inputs", ] [[package]] name = "qp-zk-circuits-common" -version = "0.1.8" -source = "git+https://github.com/Quantus-Network/qp-zk-circuits?branch=illuzen%2Fagg-fees#67cf016f6f0237f61dab644d0724124311a15e1f" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d45c3d80adc2aecbcf27902569d3ec291f5f83e9d7d17ad12530f45102963faa" dependencies = [ "anyhow", "hex", "qp-plonky2", "qp-poseidon-core", + "qp-wormhole-inputs", + "rand 0.8.5", "serde", ] @@ -4019,6 +4098,7 @@ dependencies = [ "qp-rusty-crystals-hdwallet", "qp-wormhole-aggregator", "qp-wormhole-circuit", + "qp-wormhole-inputs", "qp-wormhole-prover", "qp-wormhole-verifier", "qp-zk-circuits-common", @@ -4034,10 +4114,9 @@ dependencies = [ "subxt", "subxt-metadata", "tempfile", - "test-helpers", "thiserror 2.0.18", "tokio", - "toml 0.9.11+spec-1.1.0", + "toml 0.9.12+spec-1.1.0", ] [[package]] @@ -4237,9 +4316,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.12.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -4249,9 +4328,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -4260,9 +4339,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] name = "reqwest" @@ -4476,9 +4555,9 @@ checksum = "e5ff0cc5e135c8870a775d3320910cd9b564ec036b4dc0b8741629020be63f01" [[package]] name = "ryu" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -4974,9 +5053,9 @@ checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" [[package]] name = "smallvec" @@ -5886,12 +5965,12 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.24.0" +version = "3.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" dependencies = [ "fastrand", - "getrandom 0.3.4", + "getrandom 0.4.1", "once_cell", "rustix", "windows-sys 0.61.2", @@ -5906,18 +5985,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "test-helpers" -version = "0.1.8" -source = "git+https://github.com/Quantus-Network/qp-zk-circuits?branch=illuzen%2Fagg-fees#67cf016f6f0237f61dab644d0724124311a15e1f" -dependencies = [ - "anyhow", - "hex", - "qp-plonky2", - "qp-wormhole-circuit", - "qp-zk-circuits-common", -] - [[package]] name = "thiserror" version = "1.0.69" @@ -5969,9 +6036,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", @@ -5990,9 +6057,9 @@ checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", @@ -6109,9 +6176,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.11+spec-1.1.0" +version = "0.9.12+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" dependencies = [ "indexmap", "serde_core", @@ -6168,9 +6235,9 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.6+spec-1.1.0" +version = "1.0.7+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +checksum = "247eaa3197818b831697600aadf81514e577e0cba5eab10f7e064e78ae154df1" dependencies = [ "winnow", ] @@ -6409,9 +6476,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" [[package]] name = "unicode-normalization" @@ -6604,6 +6671,15 @@ dependencies = [ "wit-bindgen", ] +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" version = "0.2.108" @@ -6663,6 +6739,28 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser 0.244.0", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser 0.244.0", +] + [[package]] name = "wasmi" version = "0.40.0" @@ -6676,7 +6774,7 @@ dependencies = [ "wasmi_collections", "wasmi_core", "wasmi_ir", - "wasmparser", + "wasmparser 0.221.3", ] [[package]] @@ -6713,6 +6811,18 @@ dependencies = [ "bitflags 2.10.0", ] +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags 2.10.0", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + [[package]] name = "web-sys" version = "0.3.85" @@ -6739,23 +6849,23 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.5", + "webpki-root-certs 1.0.6", ] [[package]] name = "webpki-root-certs" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" dependencies = [ "rustls-pki-types", ] [[package]] name = "webpki-roots" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" +checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed" dependencies = [ "rustls-pki-types", ] @@ -7095,6 +7205,88 @@ name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.114", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.114", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags 2.10.0", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser 0.244.0", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser 0.244.0", +] [[package]] name = "writeable" @@ -7154,18 +7346,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.37" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7456cf00f0685ad319c5b1693f291a650eaf345e941d082fc4e03df8a03996ac" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.37" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1328722bbf2115db7e19d69ebcc15e795719e2d66b60827c6a69a117365e37a0" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", @@ -7248,6 +7440,6 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1966f8ac2c1f76987d69a74d0e0f929241c10e78136434e3be70ff7f58f64214" +checksum = "4de98dfa5d5b7fef4ee834d0073d560c9ca7b6c46a71d058c48db7960f8cfaf7" diff --git a/Cargo.toml b/Cargo.toml index 7dfd9b9..14ac8fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,8 +49,8 @@ rand = "0.9" aes-gcm = "0.10" # AES-256-GCM (quantum-safe with 256-bit keys) # Quantus crypto dependencies -qp-rusty-crystals-dilithium = { version = "2.0.0" } -qp-rusty-crystals-hdwallet = { version = "1.0.0" } +qp-rusty-crystals-dilithium = { version = "2.1.0" } +qp-rusty-crystals-hdwallet = { version = "1.3.0" } qp-poseidon = { version = "1.0.7", features = [ "serde", ] } @@ -79,14 +79,17 @@ subxt-metadata = "0.43.0" # ZK proof generation anyhow = "1.0" -qp-wormhole-circuit = { git = "https://github.com/Quantus-Network/qp-zk-circuits", branch = "illuzen/agg-fees", package = "qp-wormhole-circuit", default-features = false, features = ["std"] } -qp-wormhole-prover = { git = "https://github.com/Quantus-Network/qp-zk-circuits", branch = "illuzen/agg-fees", package = "qp-wormhole-prover", default-features = false, features = ["std"] } -qp-wormhole-verifier = { git = "https://github.com/Quantus-Network/qp-zk-circuits", branch = "illuzen/agg-fees", package = "qp-wormhole-verifier", default-features = false, features = ["std"] } -qp-wormhole-aggregator = { git = "https://github.com/Quantus-Network/qp-zk-circuits", branch = "illuzen/agg-fees", package = "qp-wormhole-aggregator", default-features = false, features = ["rayon", "std"] } -qp-zk-circuits-common = { git = "https://github.com/Quantus-Network/qp-zk-circuits", branch = "illuzen/agg-fees", package = "qp-zk-circuits-common", default-features = false, features = ["std"] } -qp-plonky2 = { version = "1.1.3", default-features = false } +qp-wormhole-circuit = { version = "1.0.7", default-features = false, features = ["std"] } +qp-wormhole-prover = { version = "1.0.7", default-features = false, features = ["std"] } +qp-wormhole-verifier = { version = "1.0.7", default-features = false, features = ["std"] } +qp-wormhole-aggregator = { version = "1.0.7", default-features = false, features = ["rayon", "std"] } +qp-wormhole-inputs = { version = "1.0.7", default-features = false, features = ["std"] } +qp-zk-circuits-common = { version = "1.0.7", default-features = false, features = ["std"] } +qp-plonky2 = { version = "1.1.3", default-features = false, features = ["rand", "std"] } [dev-dependencies] tempfile = "3.8" serial_test = "3.1" -qp-wormhole-test-helpers = { git = "https://github.com/Quantus-Network/qp-zk-circuits", branch = "illuzen/agg-fees", package = "test-helpers" } + +[patch.crates-io] +qp-dilithium-crypto = { git = "https://github.com/Quantus-Network/chain", branch = "testnet/planck" } diff --git a/README.md b/README.md index 8512f11..8c86085 100644 --- a/README.md +++ b/README.md @@ -119,6 +119,347 @@ Common navigation patterns: +## Command Reference + +### Wormhole (Privacy-Preserving Transfers) + +The `wormhole` commands implement a ZK-proof-based privacy layer. Funds are sent to an unspendable account derived from a secret, a zero-knowledge proof is generated to prove ownership, and the proof is verified on-chain to mint equivalent tokens to an exit account -- breaking the on-chain link between sender and receiver. + +#### `quantus wormhole address` + +Derive the unspendable wormhole address from a secret. This is step one of a private transfer -- it shows the address you need to send funds to. + +```bash +quantus wormhole address --secret 0x<64-hex-chars> +``` + +Output: +``` +Wormhole Address + SS58: qDx... + Hex: 0x... + +To fund this address: + quantus send --from --to qDx... --amount +``` + +Then send funds using a standard transfer (the chain's `WormholeProofRecorderExtension` automatically records a transfer proof for any balance transfer): + +```bash +quantus send --from crystal_alice --to qDx... --amount 100 +``` + +#### `quantus wormhole prove` + +Generate a ZK proof for an existing wormhole transfer. The proof demonstrates knowledge of the secret without revealing it. + +```bash +quantus wormhole prove \ + --secret 0x \ + --amount 100000000000000 \ + --exit-account \ + --block 0x \ + --transfer-count \ + --funding-account 0x \ + --output proof.hex +``` + +- `--exit-account`: The destination address that will receive funds after on-chain verification (SS58 or `0x`-prefixed hex). +- `--block`: Block hash where the transfer was included. +- `--transfer-count`: Transfer count from the `NativeTransferred` event. +- `--output`: Output file path for the hex-encoded proof (default: `proof.hex`). + +#### `quantus wormhole aggregate` + +Aggregate multiple leaf proofs into a single recursive proof. The aggregation circuit pads with dummy proofs and shuffles to hide which slots are real. + +```bash +quantus wormhole aggregate \ + --proofs proof_1.hex proof_2.hex \ + --output aggregated_proof.hex +``` + +- `--proofs`: One or more hex-encoded proof files. The number must not exceed `num_leaf_proofs` from the circuit config. +- Before aggregation, the CLI verifies binary hashes from `generated-bins/config.json` to detect stale circuit binaries. +- Displays timing for dummy proof generation and aggregation separately. + +#### `quantus wormhole verify-aggregated` + +Submit an aggregated proof to the chain for on-chain verification. This is an unsigned extrinsic -- no wallet is needed. + +```bash +quantus wormhole verify-aggregated --proof aggregated_proof.hex +``` + +- On success, the chain mints tokens to each exit account listed in the proof. +- The command checks for `ProofVerified` and `ExtrinsicFailed` events and reports the result. + +#### `quantus wormhole parse-proof` + +Inspect the public inputs of a proof file for debugging. + +```bash +# Parse a leaf proof +quantus wormhole parse-proof --proof proof.hex + +# Parse an aggregated proof +quantus wormhole parse-proof --proof aggregated_proof.hex --aggregated + +# Parse and cryptographically verify locally +quantus wormhole parse-proof --proof aggregated_proof.hex --aggregated --verify +``` + +#### `quantus wormhole multiround` + +Run an automated multi-round wormhole flow: fund -> prove -> aggregate -> verify on-chain, repeated over multiple rounds. This is the primary integration test for the wormhole system. + +```bash +quantus wormhole multiround \ + --num-proofs 4 \ + --rounds 2 \ + --amount 100000000000000 \ + --wallet crystal_alice \ + --password "" \ + --keep-files \ + --output-dir /tmp/wormhole_test +``` + +- `--num-proofs`: Number of proofs per round (1 to `num_leaf_proofs` from circuit config, default: 2). +- `--rounds`: Number of rounds (default: 2). In intermediate rounds, exit accounts are the next round's wormhole addresses; in the final round, funds exit back to the wallet. +- `--amount`: Total amount in planck to randomly partition across proofs (default: 100 DEV). +- `--wallet`: Wallet name for funding (round 1) and final exit. +- `--keep-files`: Preserve proof files after completion (default: cleaned up). +- `--output-dir`: Directory for intermediate proof files (default: `/tmp/wormhole_multiround`). +- `--dry-run`: Show configuration and derived addresses without executing. + +Each round performs: +1. **Transfer** (round 1 only): Randomly partition the total amount and send to wormhole addresses derived via HD path `m/44'/189189189'/0'/'/'`. +2. **Generate proofs**: Create a ZK proof for each transfer with randomized dual-output assignments. +3. **Aggregate**: Combine all leaf proofs into a single recursive proof. +4. **Verify on-chain**: Submit the aggregated proof; the chain mints tokens to exit accounts. + +After all rounds, the command verifies the wallet balance matches expectations (initial - fees). + +--- + +### Developer Tools + +#### `quantus developer build-circuits` + +Build ZK circuit binaries from the `qp-zk-circuits` repository, then copy them to the CLI and chain directories. This is required whenever the circuit logic changes. + +```bash +quantus developer build-circuits \ + --branching-factor 2 \ + --depth 1 \ + --circuits-path ../qp-zk-circuits \ + --chain-path ../chain +``` + +- `--branching-factor`: Number of proofs aggregated at each tree level. +- `--depth`: Depth of the aggregation tree. Total leaf proofs = `branching_factor ^ depth`. +- `--circuits-path`: Path to the `qp-zk-circuits` repo (default: `../qp-zk-circuits`). +- `--chain-path`: Path to the chain repo (default: `../chain`). +- `--skip-chain`: Skip copying binaries to the chain directory. + +**What it does (4 steps):** +1. Builds the `qp-wormhole-circuit-builder` binary. +2. Runs the circuit builder to generate binary files in `generated-bins/` (includes `prover.bin`, `verifier.bin`, `common.bin`, `aggregated_verifier.bin`, `aggregated_common.bin`, `config.json` with SHA256 hashes). +3. Copies binaries to the CLI's `generated-bins/` directory and touches the aggregator source to force recompilation. +4. Copies chain-relevant binaries (`aggregated_common.bin`, `aggregated_verifier.bin`, `config.json`) to `chain/pallets/wormhole/` and touches the pallet source. + +After running, rebuild the chain (`cargo build --release` in the chain directory) so `include_bytes!()` picks up the new binaries. + +#### `quantus developer create-test-wallets` + +Create standard test wallets (`crystal_alice`, `crystal_bob`, `crystal_charlie`) with developer passwords for local testing. + +```bash +quantus developer create-test-wallets +``` + +--- + +### Wallet Management + +```bash +# Create a new quantum-safe wallet +quantus wallet create --name my_wallet + +# Create with explicit derivation path +quantus wallet create --name my_wallet --derivation-path "m/44'/189189'/0'/0/0" + +# Import from mnemonic +quantus wallet import --name recovered_wallet --mnemonic "word1 word2 ... word24" + +# Create from raw 32-byte seed +quantus wallet from-seed --name raw_wallet --seed <64-hex-chars> + +# List wallets +quantus wallet list + +# View wallet details +quantus wallet view --name my_wallet + +# Export mnemonic +quantus wallet export --name my_wallet --format mnemonic +``` + +--- + +### Sending Tokens + +```bash +# Simple transfer +quantus send --from crystal_alice --to
--amount 10.5 + +# With tip for priority +quantus send --from crystal_alice --to
--amount 10 --tip 0.1 + +# With manual nonce +quantus send --from crystal_alice --to
--amount 10 --nonce 42 +``` + +--- + +### Batch Transfers + +```bash +# From a JSON file +quantus batch send --from crystal_alice --batch-file transfers.json + +# Generate identical test transfers +quantus batch send --from crystal_alice --count 10 --to
--amount 1.0 + +# Check batch limits +quantus batch config --limits +``` + +--- + +### Reversible Transfers + +Schedule transfers with a time delay, allowing cancellation before execution. + +```bash +# Schedule with default delay +quantus reversible schedule-transfer --from alice --to bob --amount 10 + +# Schedule with custom delay +quantus reversible schedule-transfer-with-delay --from alice --to bob --amount 10 --delay 3600 + +# Cancel a pending transfer +quantus reversible cancel --tx-id 0x --from alice +``` + +--- + +### High-Security Mode + +Configure reversibility settings for an account (interceptor + delay). + +```bash +# Check status +quantus high-security status --account
+ +# Enable high-security with an interceptor +quantus high-security set --interceptor
--delay-seconds 3600 --from alice + +# Show accounts you guard +quantus high-security entrusted --from alice +``` + +--- + +### Account Recovery + +Social recovery using trusted friends. + +```bash +# Initiate recovery +quantus recovery initiate --rescuer bob --lost alice + +# Friend vouches +quantus recovery vouch --friend charlie --lost alice --rescuer bob + +# Claim after threshold met +quantus recovery claim --rescuer bob --lost alice +``` + +--- + +### Treasury + +```bash +# Check treasury balance +quantus treasury balance + +# Submit a spend proposal +quantus treasury submit-spend --beneficiary
--amount 100.0 --track small --from alice + +# Payout an approved spend +quantus treasury payout --index 0 --from alice +``` + +--- + +### Privacy-Preserving Transfer Queries + +Query transfers via a Subsquid indexer using hash-prefix queries that hide your exact address. + +```bash +quantus transfers query \ + --subsquid-url https://indexer.quantus.com/graphql \ + --prefix-len 4 \ + --wallet my_wallet +``` + +--- + +### Block Analysis + +```bash +# Analyze a specific block +quantus block analyze --number 1234 --all + +# Analyze latest block +quantus block analyze --latest --extrinsics --events + +# List blocks in a range +quantus block list --start 100 --end 110 +``` + +--- + +### Generic Pallet Calls + +Call any pallet function using metadata-driven parsing: + +```bash +quantus call \ + --pallet Balances \ + --call transfer_allow_death \ + --args '["5GrwvaEF...", "1000000000000"]' \ + --from crystal_alice +``` + +--- + +### Other Commands + +| Command | Description | +|---------|-------------| +| `quantus balance --address ` | Query account balance | +| `quantus events --block 123` | Query events from a block | +| `quantus events --finalized` | Events from the latest finalized block | +| `quantus system` | System information | +| `quantus system --runtime` | Runtime version details | +| `quantus metadata --pallet Balances` | Explore chain metadata | +| `quantus version` | CLI version | +| `quantus compatibility-check` | Check CLI/node compatibility | + +--- + ## 🔧 Environment Variables ### Password Management diff --git a/generated-bins/aggregated_common.bin b/generated-bins/aggregated_common.bin new file mode 100644 index 0000000..1121006 Binary files /dev/null and b/generated-bins/aggregated_common.bin differ diff --git a/generated-bins/aggregated_verifier.bin b/generated-bins/aggregated_verifier.bin new file mode 100644 index 0000000..3ecbc8d Binary files /dev/null and b/generated-bins/aggregated_verifier.bin differ diff --git a/generated-bins/common.bin b/generated-bins/common.bin new file mode 100644 index 0000000..15d8a64 Binary files /dev/null and b/generated-bins/common.bin differ diff --git a/generated-bins/config.json b/generated-bins/config.json new file mode 100644 index 0000000..6e09f7b --- /dev/null +++ b/generated-bins/config.json @@ -0,0 +1,12 @@ +{ + "branching_factor": 16, + "depth": 1, + "num_leaf_proofs": 16, + "hashes": { + "common": "672689a87e8ed780337c0752ebc7fd1db6a63611fbd59b4ad0cbe4a4d97edcf2", + "verifier": "bb017485b12fb9c6d0b5c3db8b68f417bd3f75b2d5f3a2ea5fe12b6244233372", + "prover": "78c114c7290b04bac00551a590fd652f98194653b10ac4e11b0c0ddd5c7c0976", + "aggregated_common": "af4461081f6fb527d2b9ffb74479a133ed8b92cdd3554b46adc481a0dfc38b5d", + "aggregated_verifier": "9895c3f86682fa59cb18b7e635f0e8ba563c8445bf4b58bb77b97dcf52ee5c42" + } +} \ No newline at end of file diff --git a/generated-bins/verifier.bin b/generated-bins/verifier.bin new file mode 100644 index 0000000..21c3030 Binary files /dev/null and b/generated-bins/verifier.bin differ diff --git a/src/chain/quantus_subxt.rs b/src/chain/quantus_subxt.rs index 64fa891..0651a80 100644 --- a/src/chain/quantus_subxt.rs +++ b/src/chain/quantus_subxt.rs @@ -1470,10 +1470,9 @@ pub mod api { "query_call_info", types::QueryCallInfo { call, len }, [ - 250u8, 181u8, 79u8, 69u8, 249u8, 197u8, 196u8, 8u8, 168u8, 127u8, - 210u8, 175u8, 103u8, 16u8, 174u8, 81u8, 30u8, 178u8, 138u8, 169u8, - 15u8, 210u8, 84u8, 187u8, 149u8, 1u8, 153u8, 79u8, 109u8, 176u8, 75u8, - 142u8, + 204u8, 150u8, 141u8, 3u8, 172u8, 39u8, 127u8, 54u8, 249u8, 96u8, 163u8, + 158u8, 93u8, 236u8, 159u8, 71u8, 49u8, 22u8, 104u8, 202u8, 3u8, 96u8, + 247u8, 91u8, 244u8, 94u8, 201u8, 162u8, 142u8, 28u8, 197u8, 142u8, ], ) } @@ -1491,10 +1490,9 @@ pub mod api { "query_call_fee_details", types::QueryCallFeeDetails { call, len }, [ - 165u8, 10u8, 175u8, 229u8, 131u8, 61u8, 188u8, 169u8, 171u8, 236u8, - 15u8, 93u8, 33u8, 236u8, 188u8, 179u8, 84u8, 34u8, 34u8, 202u8, 173u8, - 166u8, 184u8, 72u8, 170u8, 64u8, 228u8, 233u8, 114u8, 200u8, 46u8, - 50u8, + 188u8, 8u8, 21u8, 155u8, 112u8, 74u8, 100u8, 5u8, 115u8, 144u8, 213u8, + 217u8, 106u8, 97u8, 48u8, 45u8, 93u8, 58u8, 101u8, 97u8, 226u8, 204u8, + 167u8, 167u8, 138u8, 151u8, 24u8, 106u8, 149u8, 11u8, 55u8, 170u8, ], ) } @@ -1996,9 +1994,9 @@ pub mod api { .hash(); runtime_metadata_hash == [ - 177u8, 66u8, 174u8, 27u8, 79u8, 228u8, 183u8, 89u8, 222u8, 162u8, 125u8, 17u8, - 90u8, 49u8, 152u8, 123u8, 131u8, 171u8, 166u8, 110u8, 79u8, 145u8, 199u8, 102u8, - 182u8, 33u8, 169u8, 66u8, 202u8, 150u8, 26u8, 128u8, + 60u8, 255u8, 4u8, 208u8, 41u8, 252u8, 232u8, 254u8, 146u8, 6u8, 174u8, 75u8, 170u8, + 225u8, 3u8, 217u8, 68u8, 231u8, 231u8, 227u8, 166u8, 46u8, 206u8, 236u8, 155u8, + 201u8, 2u8, 240u8, 72u8, 62u8, 125u8, 106u8, ] } pub mod system { @@ -3097,9 +3095,9 @@ pub mod api { "Events", (), [ - 174u8, 35u8, 207u8, 216u8, 55u8, 49u8, 233u8, 105u8, 0u8, 227u8, 170u8, - 238u8, 94u8, 63u8, 33u8, 202u8, 193u8, 145u8, 123u8, 31u8, 174u8, 0u8, - 73u8, 157u8, 169u8, 193u8, 4u8, 38u8, 37u8, 73u8, 107u8, 227u8, + 137u8, 200u8, 171u8, 24u8, 189u8, 55u8, 241u8, 103u8, 215u8, 54u8, + 98u8, 177u8, 217u8, 184u8, 55u8, 205u8, 187u8, 36u8, 71u8, 136u8, 71u8, + 63u8, 1u8, 7u8, 221u8, 213u8, 113u8, 189u8, 226u8, 251u8, 216u8, 172u8, ], ) } @@ -5379,9 +5377,10 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 13u8, 106u8, 188u8, 217u8, 128u8, 85u8, 178u8, 204u8, 154u8, 43u8, 7u8, - 229u8, 104u8, 115u8, 160u8, 200u8, 121u8, 209u8, 37u8, 2u8, 180u8, - 151u8, 26u8, 20u8, 35u8, 107u8, 3u8, 206u8, 218u8, 83u8, 42u8, 126u8, + 139u8, 70u8, 66u8, 133u8, 103u8, 122u8, 116u8, 79u8, 33u8, 114u8, + 121u8, 140u8, 45u8, 159u8, 190u8, 20u8, 82u8, 18u8, 149u8, 237u8, + 153u8, 207u8, 254u8, 49u8, 229u8, 147u8, 158u8, 47u8, 98u8, 221u8, + 64u8, 208u8, ], ) } @@ -5404,10 +5403,9 @@ pub mod api { weight, }, [ - 159u8, 52u8, 187u8, 160u8, 94u8, 11u8, 173u8, 224u8, 208u8, 187u8, - 69u8, 124u8, 172u8, 141u8, 23u8, 171u8, 156u8, 31u8, 62u8, 241u8, 41u8, - 143u8, 164u8, 88u8, 202u8, 14u8, 56u8, 203u8, 227u8, 146u8, 211u8, - 206u8, + 161u8, 255u8, 101u8, 97u8, 2u8, 224u8, 1u8, 98u8, 65u8, 47u8, 40u8, + 73u8, 58u8, 241u8, 87u8, 27u8, 49u8, 27u8, 58u8, 156u8, 63u8, 175u8, + 215u8, 152u8, 16u8, 46u8, 118u8, 155u8, 93u8, 41u8, 165u8, 173u8, ], ) } @@ -5445,9 +5443,10 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 147u8, 92u8, 106u8, 226u8, 230u8, 190u8, 231u8, 120u8, 60u8, 7u8, 78u8, - 39u8, 18u8, 11u8, 73u8, 140u8, 77u8, 96u8, 34u8, 187u8, 66u8, 35u8, - 233u8, 191u8, 6u8, 238u8, 16u8, 8u8, 41u8, 74u8, 70u8, 20u8, + 29u8, 143u8, 178u8, 190u8, 95u8, 145u8, 17u8, 240u8, 94u8, 218u8, 89u8, + 233u8, 194u8, 39u8, 196u8, 247u8, 210u8, 165u8, 71u8, 62u8, 164u8, + 175u8, 42u8, 179u8, 137u8, 187u8, 177u8, 10u8, 248u8, 157u8, 91u8, + 93u8, ], ) } @@ -6936,10 +6935,10 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 191u8, 229u8, 248u8, 28u8, 11u8, 18u8, 203u8, 70u8, 140u8, 195u8, - 148u8, 253u8, 106u8, 18u8, 75u8, 66u8, 59u8, 171u8, 5u8, 137u8, 253u8, - 245u8, 192u8, 92u8, 29u8, 127u8, 184u8, 162u8, 19u8, 85u8, 250u8, - 209u8, + 102u8, 103u8, 19u8, 115u8, 19u8, 43u8, 141u8, 147u8, 145u8, 130u8, + 118u8, 205u8, 116u8, 21u8, 169u8, 245u8, 91u8, 195u8, 69u8, 49u8, 11u8, + 189u8, 250u8, 142u8, 248u8, 124u8, 135u8, 217u8, 78u8, 252u8, 36u8, + 226u8, ], ) } @@ -6981,10 +6980,10 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 68u8, 70u8, 175u8, 198u8, 252u8, 36u8, 50u8, 142u8, 175u8, 122u8, - 246u8, 206u8, 72u8, 170u8, 126u8, 170u8, 61u8, 65u8, 90u8, 21u8, 109u8, - 162u8, 210u8, 92u8, 132u8, 83u8, 168u8, 35u8, 254u8, 206u8, 58u8, - 223u8, + 138u8, 207u8, 16u8, 247u8, 167u8, 103u8, 166u8, 249u8, 84u8, 160u8, + 115u8, 250u8, 241u8, 153u8, 56u8, 116u8, 81u8, 181u8, 152u8, 239u8, + 95u8, 135u8, 69u8, 165u8, 228u8, 216u8, 227u8, 71u8, 157u8, 114u8, + 117u8, 192u8, ], ) } @@ -7023,10 +7022,9 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 47u8, 186u8, 65u8, 172u8, 93u8, 228u8, 73u8, 229u8, 119u8, 42u8, 132u8, - 251u8, 125u8, 177u8, 249u8, 193u8, 3u8, 197u8, 140u8, 107u8, 128u8, - 194u8, 194u8, 246u8, 85u8, 102u8, 219u8, 26u8, 218u8, 161u8, 254u8, - 50u8, + 229u8, 249u8, 88u8, 250u8, 31u8, 188u8, 238u8, 42u8, 94u8, 153u8, 29u8, + 70u8, 199u8, 95u8, 46u8, 113u8, 204u8, 236u8, 225u8, 200u8, 34u8, 55u8, + 82u8, 32u8, 14u8, 154u8, 73u8, 175u8, 205u8, 6u8, 165u8, 72u8, ], ) } @@ -7051,9 +7049,9 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 156u8, 38u8, 51u8, 6u8, 248u8, 180u8, 212u8, 22u8, 238u8, 52u8, 244u8, - 139u8, 49u8, 1u8, 30u8, 92u8, 218u8, 100u8, 104u8, 68u8, 159u8, 165u8, - 167u8, 252u8, 173u8, 213u8, 238u8, 145u8, 28u8, 205u8, 181u8, 114u8, + 171u8, 36u8, 101u8, 188u8, 133u8, 123u8, 205u8, 135u8, 23u8, 2u8, 7u8, + 96u8, 10u8, 38u8, 102u8, 157u8, 170u8, 113u8, 223u8, 3u8, 144u8, 47u8, + 168u8, 57u8, 167u8, 12u8, 195u8, 66u8, 158u8, 250u8, 66u8, 157u8, ], ) } @@ -8081,9 +8079,10 @@ pub mod api { "batch", types::Batch { calls }, [ - 111u8, 39u8, 31u8, 82u8, 11u8, 56u8, 103u8, 165u8, 86u8, 35u8, 120u8, - 94u8, 247u8, 221u8, 146u8, 132u8, 251u8, 249u8, 233u8, 119u8, 137u8, - 221u8, 171u8, 224u8, 9u8, 250u8, 247u8, 73u8, 138u8, 20u8, 100u8, 95u8, + 111u8, 193u8, 119u8, 118u8, 37u8, 86u8, 134u8, 214u8, 148u8, 113u8, + 255u8, 207u8, 186u8, 6u8, 247u8, 84u8, 23u8, 246u8, 144u8, 242u8, + 114u8, 20u8, 105u8, 119u8, 137u8, 247u8, 189u8, 240u8, 237u8, 176u8, + 71u8, 249u8, ], ) } @@ -8113,10 +8112,9 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 249u8, 108u8, 205u8, 73u8, 199u8, 132u8, 64u8, 242u8, 220u8, 193u8, - 76u8, 246u8, 60u8, 28u8, 56u8, 16u8, 170u8, 136u8, 43u8, 176u8, 107u8, - 135u8, 188u8, 183u8, 32u8, 77u8, 139u8, 45u8, 255u8, 47u8, 108u8, - 177u8, + 50u8, 167u8, 42u8, 97u8, 124u8, 144u8, 249u8, 155u8, 195u8, 246u8, + 155u8, 91u8, 32u8, 182u8, 54u8, 42u8, 176u8, 178u8, 66u8, 99u8, 46u8, + 182u8, 8u8, 242u8, 233u8, 63u8, 68u8, 60u8, 204u8, 60u8, 49u8, 112u8, ], ) } @@ -8142,10 +8140,9 @@ pub mod api { "batch_all", types::BatchAll { calls }, [ - 78u8, 221u8, 198u8, 146u8, 146u8, 235u8, 29u8, 92u8, 217u8, 91u8, - 147u8, 60u8, 69u8, 132u8, 17u8, 222u8, 71u8, 96u8, 58u8, 161u8, 138u8, - 149u8, 156u8, 28u8, 203u8, 113u8, 194u8, 217u8, 223u8, 155u8, 106u8, - 71u8, + 252u8, 233u8, 123u8, 196u8, 206u8, 108u8, 105u8, 36u8, 203u8, 174u8, + 73u8, 179u8, 76u8, 42u8, 56u8, 144u8, 167u8, 61u8, 204u8, 87u8, 132u8, + 57u8, 191u8, 199u8, 63u8, 71u8, 132u8, 174u8, 54u8, 230u8, 21u8, 199u8, ], ) } @@ -8168,9 +8165,10 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 165u8, 231u8, 237u8, 221u8, 24u8, 29u8, 11u8, 140u8, 123u8, 34u8, 72u8, - 188u8, 90u8, 117u8, 151u8, 120u8, 117u8, 2u8, 191u8, 151u8, 130u8, - 93u8, 25u8, 173u8, 63u8, 52u8, 205u8, 87u8, 235u8, 226u8, 207u8, 222u8, + 243u8, 223u8, 67u8, 77u8, 222u8, 227u8, 101u8, 159u8, 246u8, 109u8, + 238u8, 21u8, 135u8, 5u8, 26u8, 29u8, 49u8, 17u8, 29u8, 167u8, 76u8, + 84u8, 227u8, 151u8, 234u8, 214u8, 35u8, 234u8, 129u8, 163u8, 181u8, + 149u8, ], ) } @@ -8196,10 +8194,9 @@ pub mod api { "force_batch", types::ForceBatch { calls }, [ - 162u8, 23u8, 181u8, 133u8, 240u8, 173u8, 61u8, 116u8, 183u8, 60u8, - 132u8, 126u8, 255u8, 27u8, 43u8, 229u8, 251u8, 234u8, 17u8, 240u8, - 253u8, 193u8, 75u8, 9u8, 195u8, 104u8, 151u8, 203u8, 82u8, 128u8, - 151u8, 194u8, + 13u8, 145u8, 1u8, 24u8, 146u8, 209u8, 200u8, 218u8, 24u8, 166u8, 190u8, + 203u8, 29u8, 162u8, 219u8, 181u8, 35u8, 237u8, 96u8, 196u8, 199u8, + 85u8, 173u8, 24u8, 184u8, 12u8, 148u8, 51u8, 14u8, 105u8, 131u8, 132u8, ], ) } @@ -8222,10 +8219,10 @@ pub mod api { weight, }, [ - 171u8, 121u8, 3u8, 182u8, 182u8, 104u8, 23u8, 118u8, 127u8, 89u8, - 108u8, 88u8, 24u8, 108u8, 99u8, 178u8, 183u8, 201u8, 110u8, 214u8, - 98u8, 78u8, 16u8, 149u8, 17u8, 130u8, 106u8, 68u8, 128u8, 255u8, 56u8, - 54u8, + 111u8, 148u8, 45u8, 217u8, 132u8, 185u8, 150u8, 232u8, 91u8, 77u8, + 142u8, 98u8, 175u8, 13u8, 252u8, 220u8, 199u8, 177u8, 171u8, 155u8, + 84u8, 242u8, 40u8, 132u8, 77u8, 201u8, 7u8, 25u8, 102u8, 169u8, 235u8, + 6u8, ], ) } @@ -8265,9 +8262,9 @@ pub mod api { fallback: ::subxt::ext::subxt_core::alloc::boxed::Box::new(fallback), }, [ - 232u8, 191u8, 10u8, 242u8, 23u8, 164u8, 0u8, 128u8, 50u8, 227u8, 113u8, - 190u8, 172u8, 153u8, 223u8, 24u8, 31u8, 107u8, 172u8, 175u8, 157u8, - 13u8, 81u8, 196u8, 117u8, 202u8, 249u8, 20u8, 187u8, 2u8, 145u8, 204u8, + 130u8, 245u8, 121u8, 31u8, 205u8, 1u8, 6u8, 43u8, 62u8, 146u8, 200u8, + 61u8, 223u8, 162u8, 42u8, 243u8, 202u8, 252u8, 66u8, 155u8, 227u8, + 70u8, 211u8, 133u8, 15u8, 65u8, 72u8, 47u8, 175u8, 127u8, 80u8, 88u8, ], ) } @@ -8290,9 +8287,9 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 21u8, 52u8, 11u8, 8u8, 36u8, 3u8, 245u8, 168u8, 231u8, 34u8, 214u8, - 36u8, 127u8, 82u8, 55u8, 53u8, 161u8, 38u8, 171u8, 208u8, 133u8, 239u8, - 183u8, 54u8, 207u8, 101u8, 244u8, 110u8, 150u8, 221u8, 86u8, 234u8, + 169u8, 9u8, 148u8, 133u8, 139u8, 233u8, 123u8, 12u8, 26u8, 40u8, 84u8, + 195u8, 239u8, 201u8, 104u8, 122u8, 9u8, 46u8, 249u8, 206u8, 220u8, + 186u8, 225u8, 142u8, 158u8, 10u8, 204u8, 24u8, 30u8, 246u8, 88u8, 15u8, ], ) } @@ -15324,10 +15321,10 @@ pub mod api { call: ::subxt::ext::subxt_core::alloc::boxed::Box::new(call), }, [ - 126u8, 156u8, 236u8, 234u8, 175u8, 29u8, 163u8, 235u8, 36u8, 69u8, - 66u8, 210u8, 103u8, 127u8, 94u8, 104u8, 170u8, 113u8, 150u8, 151u8, - 236u8, 43u8, 243u8, 172u8, 194u8, 209u8, 6u8, 137u8, 147u8, 128u8, - 113u8, 236u8, + 125u8, 10u8, 181u8, 50u8, 177u8, 114u8, 92u8, 100u8, 221u8, 17u8, 19u8, + 106u8, 84u8, 156u8, 189u8, 217u8, 223u8, 233u8, 245u8, 200u8, 8u8, + 24u8, 21u8, 189u8, 205u8, 170u8, 119u8, 109u8, 156u8, 192u8, 139u8, + 202u8, ], ) } @@ -19885,17 +19882,25 @@ pub mod api { #[encode_as_type( crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode" )] - #[doc = "Create a new multisig account"] + #[doc = "Create a new multisig account with deterministic address"] #[doc = ""] #[doc = "Parameters:"] #[doc = "- `signers`: List of accounts that can sign for this multisig"] #[doc = "- `threshold`: Number of approvals required to execute transactions"] + #[doc = "- `nonce`: User-provided nonce for address uniqueness"] + #[doc = ""] + #[doc = "The multisig address is deterministically derived from:"] + #[doc = "hash(pallet_id || sorted_signers || threshold || nonce)"] + #[doc = ""] + #[doc = "Signers are automatically sorted before hashing, so order doesn't matter."] #[doc = ""] - #[doc = "The multisig address is derived from a hash of all signers + global nonce."] - #[doc = "The creator must pay a non-refundable fee (burned)."] + #[doc = "Economic costs:"] + #[doc = "- MultisigFee: burned immediately (spam prevention)"] + #[doc = "- MultisigDeposit: reserved until dissolution, then returned to creator (storage bond)"] pub struct CreateMultisig { pub signers: create_multisig::Signers, pub threshold: create_multisig::Threshold, + pub nonce: create_multisig::Nonce, } pub mod create_multisig { use super::runtime_types; @@ -19903,6 +19908,7 @@ pub mod api { ::subxt::ext::subxt_core::utils::AccountId32, >; pub type Threshold = ::core::primitive::u32; + pub type Nonce = ::core::primitive::u64; } impl ::subxt::ext::subxt_core::blocks::StaticExtrinsic for CreateMultisig { const PALLET: &'static str = "Multisig"; @@ -19930,11 +19936,13 @@ pub mod api { #[doc = "- A deposit (refundable - returned immediately on execution/cancellation)"] #[doc = "- A fee (non-refundable, burned immediately)"] #[doc = ""] - #[doc = "**Auto-cleanup:** Before creating a new proposal, ALL expired proposals are"] - #[doc = "automatically removed and deposits returned to original proposers. This is the primary"] - #[doc = "cleanup mechanism."] + #[doc = "**Auto-cleanup:** Before creating a new proposal, ALL proposer's expired"] + #[doc = "proposals are automatically removed. This is the primary cleanup mechanism."] #[doc = ""] #[doc = "**For threshold=1:** If the multisig threshold is 1, the proposal executes immediately."] + #[doc = ""] + #[doc = "**Weight:** Charged upfront for worst-case (high-security path with decode)."] + #[doc = "Refunded to actual cost on success based on whether HS path was taken."] pub struct Propose { pub multisig_address: propose::MultisigAddress, pub call: propose::Call, @@ -19965,16 +19973,13 @@ pub mod api { #[doc = "Approve a proposed transaction"] #[doc = ""] #[doc = "If this approval brings the total approvals to or above the threshold,"] - #[doc = "the transaction will be automatically executed."] - #[doc = ""] - #[doc = "**Auto-cleanup:** Before processing the approval, ALL expired proposals are"] - #[doc = "automatically removed and deposits returned to original proposers."] + #[doc = "the proposal status changes to `Approved` and can be executed via `execute()`."] #[doc = ""] #[doc = "Parameters:"] #[doc = "- `multisig_address`: The multisig account"] #[doc = "- `proposal_id`: ID (nonce) of the proposal to approve"] #[doc = ""] - #[doc = "Weight: Charges for MAX call size and MAX expired proposals, refunds based on actual"] + #[doc = "Weight: Charges for MAX call size, refunds based on actual"] pub struct Approve { pub multisig_address: approve::MultisigAddress, pub proposal_id: approve::ProposalId, @@ -20001,14 +20006,9 @@ pub mod api { )] #[doc = "Cancel a proposed transaction (only by proposer)"] #[doc = ""] - #[doc = "**Auto-cleanup:** Before processing the cancellation, ALL expired proposals are"] - #[doc = "automatically removed and deposits returned to original proposers."] - #[doc = ""] #[doc = "Parameters:"] #[doc = "- `multisig_address`: The multisig account"] #[doc = "- `proposal_id`: ID (nonce) of the proposal to cancel"] - #[doc = ""] - #[doc = "Weight: Charges for MAX call size and MAX expired proposals, refunds based on actual"] pub struct Cancel { pub multisig_address: cancel::MultisigAddress, pub proposal_id: cancel::ProposalId, @@ -20097,50 +20097,101 @@ pub mod api { #[encode_as_type( crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode" )] - #[doc = "Dissolve (remove) a multisig and recover the creation deposit."] + #[doc = "Execute an approved proposal"] + #[doc = ""] + #[doc = "Can be called by any signer of the multisig once the proposal has reached"] + #[doc = "the approval threshold (status = Approved). The proposal must not be expired."] + #[doc = ""] + #[doc = "On execution:"] + #[doc = "- The call is decoded and dispatched as the multisig account"] + #[doc = "- Proposal is removed from storage"] + #[doc = "- Deposit is returned to the proposer"] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `multisig_address`: The multisig account"] + #[doc = "- `proposal_id`: ID (nonce) of the proposal to execute"] + pub struct Execute { + pub multisig_address: execute::MultisigAddress, + pub proposal_id: execute::ProposalId, + } + pub mod execute { + use super::runtime_types; + pub type MultisigAddress = ::subxt::ext::subxt_core::utils::AccountId32; + pub type ProposalId = ::core::primitive::u32; + } + impl ::subxt::ext::subxt_core::blocks::StaticExtrinsic for Execute { + const PALLET: &'static str = "Multisig"; + const CALL: &'static str = "execute"; + } + #[derive( + :: subxt :: ext :: subxt_core :: ext :: scale_decode :: DecodeAsType, + :: subxt :: ext :: subxt_core :: ext :: scale_encode :: EncodeAsType, + Debug, + )] + #[decode_as_type( + crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_decode" + )] + #[encode_as_type( + crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode" + )] + #[doc = "Approve dissolving a multisig account"] + #[doc = ""] + #[doc = "Signers call this to approve dissolving the multisig."] + #[doc = "When threshold is reached, the multisig is automatically dissolved."] #[doc = ""] #[doc = "Requirements:"] - #[doc = "- No proposals exist (active, executed, or cancelled) - must be fully cleaned up."] - #[doc = "- Multisig account balance must be zero."] - #[doc = "- Can be called by the creator OR any signer."] + #[doc = "- Caller must be a signer"] + #[doc = "- No proposals exist (active, executed, or cancelled) - must be fully cleaned up"] + #[doc = "- Multisig account balance must be zero"] #[doc = ""] - #[doc = "The deposit is ALWAYS returned to the original `creator` stored in `MultisigData`."] - pub struct DissolveMultisig { - pub multisig_address: dissolve_multisig::MultisigAddress, + #[doc = "When threshold is reached:"] + #[doc = "- Deposit is returned to creator"] + #[doc = "- Multisig storage is removed"] + pub struct ApproveDissolve { + pub multisig_address: approve_dissolve::MultisigAddress, } - pub mod dissolve_multisig { + pub mod approve_dissolve { use super::runtime_types; pub type MultisigAddress = ::subxt::ext::subxt_core::utils::AccountId32; } - impl ::subxt::ext::subxt_core::blocks::StaticExtrinsic for DissolveMultisig { + impl ::subxt::ext::subxt_core::blocks::StaticExtrinsic for ApproveDissolve { const PALLET: &'static str = "Multisig"; - const CALL: &'static str = "dissolve_multisig"; + const CALL: &'static str = "approve_dissolve"; } } pub struct TransactionApi; impl TransactionApi { - #[doc = "Create a new multisig account"] + #[doc = "Create a new multisig account with deterministic address"] #[doc = ""] #[doc = "Parameters:"] #[doc = "- `signers`: List of accounts that can sign for this multisig"] #[doc = "- `threshold`: Number of approvals required to execute transactions"] + #[doc = "- `nonce`: User-provided nonce for address uniqueness"] + #[doc = ""] + #[doc = "The multisig address is deterministically derived from:"] + #[doc = "hash(pallet_id || sorted_signers || threshold || nonce)"] + #[doc = ""] + #[doc = "Signers are automatically sorted before hashing, so order doesn't matter."] #[doc = ""] - #[doc = "The multisig address is derived from a hash of all signers + global nonce."] - #[doc = "The creator must pay a non-refundable fee (burned)."] + #[doc = "Economic costs:"] + #[doc = "- MultisigFee: burned immediately (spam prevention)"] + #[doc = "- MultisigDeposit: reserved until dissolution, then returned to creator (storage bond)"] pub fn create_multisig( &self, signers: types::create_multisig::Signers, threshold: types::create_multisig::Threshold, + nonce: types::create_multisig::Nonce, ) -> ::subxt::ext::subxt_core::tx::payload::StaticPayload { ::subxt::ext::subxt_core::tx::payload::StaticPayload::new_static( "Multisig", "create_multisig", - types::CreateMultisig { signers, threshold }, + types::CreateMultisig { signers, threshold, nonce }, [ - 245u8, 16u8, 24u8, 202u8, 226u8, 192u8, 244u8, 3u8, 112u8, 49u8, 179u8, - 60u8, 108u8, 206u8, 77u8, 130u8, 112u8, 243u8, 163u8, 11u8, 239u8, - 153u8, 232u8, 78u8, 27u8, 44u8, 88u8, 181u8, 65u8, 151u8, 98u8, 130u8, + 126u8, 145u8, 23u8, 129u8, 179u8, 174u8, 124u8, 92u8, 17u8, 77u8, 39u8, + 143u8, 138u8, 202u8, 71u8, 46u8, 71u8, 104u8, 68u8, 236u8, 223u8, + 128u8, 124u8, 89u8, 133u8, 103u8, 92u8, 150u8, 75u8, 49u8, 253u8, + 177u8, ], ) } @@ -20155,11 +20206,13 @@ pub mod api { #[doc = "- A deposit (refundable - returned immediately on execution/cancellation)"] #[doc = "- A fee (non-refundable, burned immediately)"] #[doc = ""] - #[doc = "**Auto-cleanup:** Before creating a new proposal, ALL expired proposals are"] - #[doc = "automatically removed and deposits returned to original proposers. This is the primary"] - #[doc = "cleanup mechanism."] + #[doc = "**Auto-cleanup:** Before creating a new proposal, ALL proposer's expired"] + #[doc = "proposals are automatically removed. This is the primary cleanup mechanism."] #[doc = ""] #[doc = "**For threshold=1:** If the multisig threshold is 1, the proposal executes immediately."] + #[doc = ""] + #[doc = "**Weight:** Charged upfront for worst-case (high-security path with decode)."] + #[doc = "Refunded to actual cost on success based on whether HS path was taken."] pub fn propose( &self, multisig_address: types::propose::MultisigAddress, @@ -20181,16 +20234,13 @@ pub mod api { #[doc = "Approve a proposed transaction"] #[doc = ""] #[doc = "If this approval brings the total approvals to or above the threshold,"] - #[doc = "the transaction will be automatically executed."] - #[doc = ""] - #[doc = "**Auto-cleanup:** Before processing the approval, ALL expired proposals are"] - #[doc = "automatically removed and deposits returned to original proposers."] + #[doc = "the proposal status changes to `Approved` and can be executed via `execute()`."] #[doc = ""] #[doc = "Parameters:"] #[doc = "- `multisig_address`: The multisig account"] #[doc = "- `proposal_id`: ID (nonce) of the proposal to approve"] #[doc = ""] - #[doc = "Weight: Charges for MAX call size and MAX expired proposals, refunds based on actual"] + #[doc = "Weight: Charges for MAX call size, refunds based on actual"] pub fn approve( &self, multisig_address: types::approve::MultisigAddress, @@ -20209,14 +20259,9 @@ pub mod api { } #[doc = "Cancel a proposed transaction (only by proposer)"] #[doc = ""] - #[doc = "**Auto-cleanup:** Before processing the cancellation, ALL expired proposals are"] - #[doc = "automatically removed and deposits returned to original proposers."] - #[doc = ""] #[doc = "Parameters:"] #[doc = "- `multisig_address`: The multisig account"] #[doc = "- `proposal_id`: ID (nonce) of the proposal to cancel"] - #[doc = ""] - #[doc = "Weight: Charges for MAX call size and MAX expired proposals, refunds based on actual"] pub fn cancel( &self, multisig_address: types::cancel::MultisigAddress, @@ -20284,28 +20329,62 @@ pub mod api { ], ) } - #[doc = "Dissolve (remove) a multisig and recover the creation deposit."] + #[doc = "Execute an approved proposal"] + #[doc = ""] + #[doc = "Can be called by any signer of the multisig once the proposal has reached"] + #[doc = "the approval threshold (status = Approved). The proposal must not be expired."] + #[doc = ""] + #[doc = "On execution:"] + #[doc = "- The call is decoded and dispatched as the multisig account"] + #[doc = "- Proposal is removed from storage"] + #[doc = "- Deposit is returned to the proposer"] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `multisig_address`: The multisig account"] + #[doc = "- `proposal_id`: ID (nonce) of the proposal to execute"] + pub fn execute( + &self, + multisig_address: types::execute::MultisigAddress, + proposal_id: types::execute::ProposalId, + ) -> ::subxt::ext::subxt_core::tx::payload::StaticPayload { + ::subxt::ext::subxt_core::tx::payload::StaticPayload::new_static( + "Multisig", + "execute", + types::Execute { multisig_address, proposal_id }, + [ + 209u8, 110u8, 225u8, 231u8, 188u8, 230u8, 192u8, 42u8, 43u8, 233u8, + 158u8, 149u8, 58u8, 203u8, 142u8, 44u8, 40u8, 27u8, 211u8, 194u8, 26u8, + 7u8, 7u8, 254u8, 29u8, 245u8, 230u8, 195u8, 82u8, 108u8, 1u8, 27u8, + ], + ) + } + #[doc = "Approve dissolving a multisig account"] + #[doc = ""] + #[doc = "Signers call this to approve dissolving the multisig."] + #[doc = "When threshold is reached, the multisig is automatically dissolved."] #[doc = ""] #[doc = "Requirements:"] - #[doc = "- No proposals exist (active, executed, or cancelled) - must be fully cleaned up."] - #[doc = "- Multisig account balance must be zero."] - #[doc = "- Can be called by the creator OR any signer."] + #[doc = "- Caller must be a signer"] + #[doc = "- No proposals exist (active, executed, or cancelled) - must be fully cleaned up"] + #[doc = "- Multisig account balance must be zero"] #[doc = ""] - #[doc = "The deposit is ALWAYS returned to the original `creator` stored in `MultisigData`."] - pub fn dissolve_multisig( + #[doc = "When threshold is reached:"] + #[doc = "- Deposit is returned to creator"] + #[doc = "- Multisig storage is removed"] + pub fn approve_dissolve( &self, - multisig_address: types::dissolve_multisig::MultisigAddress, - ) -> ::subxt::ext::subxt_core::tx::payload::StaticPayload + multisig_address: types::approve_dissolve::MultisigAddress, + ) -> ::subxt::ext::subxt_core::tx::payload::StaticPayload { ::subxt::ext::subxt_core::tx::payload::StaticPayload::new_static( "Multisig", - "dissolve_multisig", - types::DissolveMultisig { multisig_address }, + "approve_dissolve", + types::ApproveDissolve { multisig_address }, [ - 2u8, 71u8, 80u8, 125u8, 58u8, 244u8, 234u8, 154u8, 158u8, 71u8, 21u8, - 125u8, 188u8, 116u8, 1u8, 232u8, 43u8, 105u8, 162u8, 156u8, 132u8, - 57u8, 252u8, 187u8, 139u8, 106u8, 160u8, 157u8, 159u8, 64u8, 140u8, - 239u8, + 156u8, 98u8, 164u8, 184u8, 61u8, 224u8, 117u8, 109u8, 44u8, 173u8, + 59u8, 188u8, 164u8, 233u8, 191u8, 223u8, 240u8, 203u8, 164u8, 113u8, + 184u8, 187u8, 41u8, 154u8, 87u8, 135u8, 229u8, 56u8, 35u8, 196u8, + 136u8, 241u8, ], ) } @@ -20400,6 +20479,29 @@ pub mod api { )] #[decode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_decode")] #[encode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode")] + #[doc = "A proposal has reached threshold and is ready to execute"] + pub struct ProposalReadyToExecute { + pub multisig_address: proposal_ready_to_execute::MultisigAddress, + pub proposal_id: proposal_ready_to_execute::ProposalId, + pub approvals_count: proposal_ready_to_execute::ApprovalsCount, + } + pub mod proposal_ready_to_execute { + use super::runtime_types; + pub type MultisigAddress = ::subxt::ext::subxt_core::utils::AccountId32; + pub type ProposalId = ::core::primitive::u32; + pub type ApprovalsCount = ::core::primitive::u32; + } + impl ::subxt::ext::subxt_core::events::StaticEvent for ProposalReadyToExecute { + const PALLET: &'static str = "Multisig"; + const EVENT: &'static str = "ProposalReadyToExecute"; + } + #[derive( + :: subxt :: ext :: subxt_core :: ext :: scale_decode :: DecodeAsType, + :: subxt :: ext :: subxt_core :: ext :: scale_encode :: EncodeAsType, + Debug, + )] + #[decode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_decode")] + #[encode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode")] #[doc = "A proposal has been executed"] #[doc = "Contains all data needed for indexing by SubSquid"] pub struct ProposalExecuted { @@ -20508,17 +20610,42 @@ pub mod api { )] #[decode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_decode")] #[encode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode")] - #[doc = "A multisig account was dissolved and deposit returned"] + #[doc = "A signer approved dissolving the multisig"] + pub struct DissolveApproved { + pub multisig_address: dissolve_approved::MultisigAddress, + pub approver: dissolve_approved::Approver, + pub approvals_count: dissolve_approved::ApprovalsCount, + } + pub mod dissolve_approved { + use super::runtime_types; + pub type MultisigAddress = ::subxt::ext::subxt_core::utils::AccountId32; + pub type Approver = ::subxt::ext::subxt_core::utils::AccountId32; + pub type ApprovalsCount = ::core::primitive::u32; + } + impl ::subxt::ext::subxt_core::events::StaticEvent for DissolveApproved { + const PALLET: &'static str = "Multisig"; + const EVENT: &'static str = "DissolveApproved"; + } + #[derive( + :: subxt :: ext :: subxt_core :: ext :: scale_decode :: DecodeAsType, + :: subxt :: ext :: subxt_core :: ext :: scale_encode :: EncodeAsType, + Debug, + )] + #[decode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_decode")] + #[encode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode")] + #[doc = "A multisig account was dissolved (threshold reached)"] pub struct MultisigDissolved { pub multisig_address: multisig_dissolved::MultisigAddress, - pub caller: multisig_dissolved::Caller, pub deposit_returned: multisig_dissolved::DepositReturned, + pub approvers: multisig_dissolved::Approvers, } pub mod multisig_dissolved { use super::runtime_types; pub type MultisigAddress = ::subxt::ext::subxt_core::utils::AccountId32; - pub type Caller = ::subxt::ext::subxt_core::utils::AccountId32; - pub type DepositReturned = ::core::primitive::u128; + pub type DepositReturned = ::subxt::ext::subxt_core::utils::AccountId32; + pub type Approvers = ::subxt::ext::subxt_core::alloc::vec::Vec< + ::subxt::ext::subxt_core::utils::AccountId32, + >; } impl ::subxt::ext::subxt_core::events::StaticEvent for MultisigDissolved { const PALLET: &'static str = "Multisig"; @@ -20529,14 +20656,9 @@ pub mod api { use super::runtime_types; pub mod types { use super::runtime_types; - pub mod global_nonce { - use super::runtime_types; - pub type GlobalNonce = ::core::primitive::u64; - } pub mod multisigs { use super::runtime_types; pub type Multisigs = runtime_types::pallet_multisig::MultisigData< - ::core::primitive::u32, ::subxt::ext::subxt_core::utils::AccountId32, runtime_types::bounded_collections::bounded_vec::BoundedVec< ::subxt::ext::subxt_core::utils::AccountId32, @@ -20565,31 +20687,18 @@ pub mod api { pub type Param0 = ::subxt::ext::subxt_core::utils::AccountId32; pub type Param1 = ::core::primitive::u32; } + pub mod dissolve_approvals { + use super::runtime_types; + pub type DissolveApprovals = + runtime_types::bounded_collections::bounded_vec::BoundedVec< + ::subxt::ext::subxt_core::utils::AccountId32, + >; + pub type Param0 = ::subxt::ext::subxt_core::utils::AccountId32; + } } pub struct StorageApi; impl StorageApi { - #[doc = " Global nonce for generating unique multisig addresses"] - pub fn global_nonce( - &self, - ) -> ::subxt::ext::subxt_core::storage::address::StaticAddress< - (), - types::global_nonce::GlobalNonce, - ::subxt::ext::subxt_core::utils::Yes, - ::subxt::ext::subxt_core::utils::Yes, - (), - > { - ::subxt::ext::subxt_core::storage::address::StaticAddress::new_static( - "Multisig", - "GlobalNonce", - (), - [ - 119u8, 119u8, 84u8, 141u8, 83u8, 67u8, 42u8, 83u8, 51u8, 196u8, 185u8, - 39u8, 227u8, 125u8, 142u8, 154u8, 107u8, 62u8, 127u8, 13u8, 54u8, - 114u8, 201u8, 6u8, 100u8, 28u8, 202u8, 152u8, 246u8, 202u8, 9u8, 29u8, - ], - ) - } - #[doc = " Multisigs stored by their generated address"] + #[doc = " Multisigs stored by their deterministic address"] pub fn multisigs_iter( &self, ) -> ::subxt::ext::subxt_core::storage::address::StaticAddress< @@ -20604,13 +20713,13 @@ pub mod api { "Multisigs", (), [ - 95u8, 91u8, 215u8, 222u8, 132u8, 122u8, 22u8, 101u8, 197u8, 243u8, - 217u8, 12u8, 255u8, 230u8, 38u8, 200u8, 69u8, 179u8, 47u8, 227u8, 70u8, - 230u8, 25u8, 50u8, 224u8, 85u8, 127u8, 189u8, 241u8, 183u8, 21u8, 32u8, + 81u8, 182u8, 236u8, 127u8, 98u8, 244u8, 6u8, 51u8, 209u8, 6u8, 214u8, + 144u8, 49u8, 117u8, 203u8, 39u8, 180u8, 247u8, 172u8, 228u8, 72u8, + 25u8, 171u8, 55u8, 41u8, 236u8, 14u8, 135u8, 22u8, 6u8, 241u8, 230u8, ], ) } - #[doc = " Multisigs stored by their generated address"] + #[doc = " Multisigs stored by their deterministic address"] pub fn multisigs( &self, _0: types::multisigs::Param0, @@ -20628,9 +20737,9 @@ pub mod api { "Multisigs", ::subxt::ext::subxt_core::storage::address::StaticStorageKey::new(_0), [ - 95u8, 91u8, 215u8, 222u8, 132u8, 122u8, 22u8, 101u8, 197u8, 243u8, - 217u8, 12u8, 255u8, 230u8, 38u8, 200u8, 69u8, 179u8, 47u8, 227u8, 70u8, - 230u8, 25u8, 50u8, 224u8, 85u8, 127u8, 189u8, 241u8, 183u8, 21u8, 32u8, + 81u8, 182u8, 236u8, 127u8, 98u8, 244u8, 6u8, 51u8, 209u8, 6u8, 214u8, + 144u8, 49u8, 117u8, 203u8, 39u8, 180u8, 247u8, 172u8, 228u8, 72u8, + 25u8, 171u8, 55u8, 41u8, 236u8, 14u8, 135u8, 22u8, 6u8, 241u8, 230u8, ], ) } @@ -20649,9 +20758,10 @@ pub mod api { "Proposals", (), [ - 91u8, 232u8, 160u8, 102u8, 108u8, 104u8, 162u8, 6u8, 45u8, 30u8, 146u8, - 207u8, 18u8, 43u8, 197u8, 37u8, 166u8, 99u8, 30u8, 17u8, 46u8, 210u8, - 56u8, 209u8, 9u8, 35u8, 221u8, 140u8, 178u8, 248u8, 249u8, 214u8, + 102u8, 10u8, 240u8, 43u8, 229u8, 237u8, 64u8, 243u8, 64u8, 7u8, 59u8, + 83u8, 229u8, 106u8, 209u8, 184u8, 240u8, 116u8, 205u8, 176u8, 4u8, + 247u8, 234u8, 87u8, 177u8, 197u8, 117u8, 38u8, 83u8, 216u8, 218u8, + 67u8, ], ) } @@ -20673,9 +20783,10 @@ pub mod api { "Proposals", ::subxt::ext::subxt_core::storage::address::StaticStorageKey::new(_0), [ - 91u8, 232u8, 160u8, 102u8, 108u8, 104u8, 162u8, 6u8, 45u8, 30u8, 146u8, - 207u8, 18u8, 43u8, 197u8, 37u8, 166u8, 99u8, 30u8, 17u8, 46u8, 210u8, - 56u8, 209u8, 9u8, 35u8, 221u8, 140u8, 178u8, 248u8, 249u8, 214u8, + 102u8, 10u8, 240u8, 43u8, 229u8, 237u8, 64u8, 243u8, 64u8, 7u8, 59u8, + 83u8, 229u8, 106u8, 209u8, 184u8, 240u8, 116u8, 205u8, 176u8, 4u8, + 247u8, 234u8, 87u8, 177u8, 197u8, 117u8, 38u8, 83u8, 216u8, 218u8, + 67u8, ], ) } @@ -20706,9 +20817,57 @@ pub mod api { ::subxt::ext::subxt_core::storage::address::StaticStorageKey::new(_1), ), [ - 91u8, 232u8, 160u8, 102u8, 108u8, 104u8, 162u8, 6u8, 45u8, 30u8, 146u8, - 207u8, 18u8, 43u8, 197u8, 37u8, 166u8, 99u8, 30u8, 17u8, 46u8, 210u8, - 56u8, 209u8, 9u8, 35u8, 221u8, 140u8, 178u8, 248u8, 249u8, 214u8, + 102u8, 10u8, 240u8, 43u8, 229u8, 237u8, 64u8, 243u8, 64u8, 7u8, 59u8, + 83u8, 229u8, 106u8, 209u8, 184u8, 240u8, 116u8, 205u8, 176u8, 4u8, + 247u8, 234u8, 87u8, 177u8, 197u8, 117u8, 38u8, 83u8, 216u8, 218u8, + 67u8, + ], + ) + } + #[doc = " Dissolve approvals: tracks which signers approved dissolving the multisig"] + #[doc = " Maps multisig_address -> Vec"] + pub fn dissolve_approvals_iter( + &self, + ) -> ::subxt::ext::subxt_core::storage::address::StaticAddress< + (), + types::dissolve_approvals::DissolveApprovals, + (), + (), + ::subxt::ext::subxt_core::utils::Yes, + > { + ::subxt::ext::subxt_core::storage::address::StaticAddress::new_static( + "Multisig", + "DissolveApprovals", + (), + [ + 204u8, 17u8, 210u8, 54u8, 125u8, 128u8, 75u8, 21u8, 158u8, 13u8, 205u8, + 89u8, 98u8, 73u8, 141u8, 159u8, 53u8, 129u8, 19u8, 195u8, 2u8, 178u8, + 26u8, 137u8, 206u8, 7u8, 108u8, 196u8, 195u8, 4u8, 54u8, 111u8, + ], + ) + } + #[doc = " Dissolve approvals: tracks which signers approved dissolving the multisig"] + #[doc = " Maps multisig_address -> Vec"] + pub fn dissolve_approvals( + &self, + _0: types::dissolve_approvals::Param0, + ) -> ::subxt::ext::subxt_core::storage::address::StaticAddress< + ::subxt::ext::subxt_core::storage::address::StaticStorageKey< + types::dissolve_approvals::Param0, + >, + types::dissolve_approvals::DissolveApprovals, + ::subxt::ext::subxt_core::utils::Yes, + (), + (), + > { + ::subxt::ext::subxt_core::storage::address::StaticAddress::new_static( + "Multisig", + "DissolveApprovals", + ::subxt::ext::subxt_core::storage::address::StaticStorageKey::new(_0), + [ + 204u8, 17u8, 210u8, 54u8, 125u8, 128u8, 75u8, 21u8, 158u8, 13u8, 205u8, + 89u8, 98u8, 73u8, 141u8, 159u8, 53u8, 129u8, 19u8, 195u8, 2u8, 178u8, + 26u8, 137u8, 206u8, 7u8, 108u8, 196u8, 195u8, 4u8, 54u8, 111u8, ], ) } @@ -20917,89 +21076,6 @@ pub mod api { #[encode_as_type( crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode" )] - pub struct VerifyWormholeProof { - pub proof_bytes: verify_wormhole_proof::ProofBytes, - } - pub mod verify_wormhole_proof { - use super::runtime_types; - pub type ProofBytes = - ::subxt::ext::subxt_core::alloc::vec::Vec<::core::primitive::u8>; - } - impl ::subxt::ext::subxt_core::blocks::StaticExtrinsic for VerifyWormholeProof { - const PALLET: &'static str = "Wormhole"; - const CALL: &'static str = "verify_wormhole_proof"; - } - #[derive( - :: subxt :: ext :: subxt_core :: ext :: scale_decode :: DecodeAsType, - :: subxt :: ext :: subxt_core :: ext :: scale_encode :: EncodeAsType, - Debug, - )] - #[decode_as_type( - crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_decode" - )] - #[encode_as_type( - crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode" - )] - #[doc = "Transfer native tokens and store proof for wormhole"] - pub struct TransferNative { - pub dest: transfer_native::Dest, - #[codec(compact)] - pub amount: transfer_native::Amount, - } - pub mod transfer_native { - use super::runtime_types; - pub type Dest = ::subxt::ext::subxt_core::utils::MultiAddress< - ::subxt::ext::subxt_core::utils::AccountId32, - (), - >; - pub type Amount = ::core::primitive::u128; - } - impl ::subxt::ext::subxt_core::blocks::StaticExtrinsic for TransferNative { - const PALLET: &'static str = "Wormhole"; - const CALL: &'static str = "transfer_native"; - } - #[derive( - :: subxt :: ext :: subxt_core :: ext :: scale_decode :: DecodeAsType, - :: subxt :: ext :: subxt_core :: ext :: scale_encode :: EncodeAsType, - Debug, - )] - #[decode_as_type( - crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_decode" - )] - #[encode_as_type( - crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode" - )] - #[doc = "Transfer asset tokens and store proof for wormhole"] - pub struct TransferAsset { - pub asset_id: transfer_asset::AssetId, - pub dest: transfer_asset::Dest, - #[codec(compact)] - pub amount: transfer_asset::Amount, - } - pub mod transfer_asset { - use super::runtime_types; - pub type AssetId = ::core::primitive::u32; - pub type Dest = ::subxt::ext::subxt_core::utils::MultiAddress< - ::subxt::ext::subxt_core::utils::AccountId32, - (), - >; - pub type Amount = ::core::primitive::u128; - } - impl ::subxt::ext::subxt_core::blocks::StaticExtrinsic for TransferAsset { - const PALLET: &'static str = "Wormhole"; - const CALL: &'static str = "transfer_asset"; - } - #[derive( - :: subxt :: ext :: subxt_core :: ext :: scale_decode :: DecodeAsType, - :: subxt :: ext :: subxt_core :: ext :: scale_encode :: EncodeAsType, - Debug, - )] - #[decode_as_type( - crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_decode" - )] - #[encode_as_type( - crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode" - )] #[doc = "Verify an aggregated wormhole proof and process all transfers in the batch"] pub struct VerifyAggregatedProof { pub proof_bytes: verify_aggregated_proof::ProofBytes, @@ -21016,60 +21092,6 @@ pub mod api { } pub struct TransactionApi; impl TransactionApi { - pub fn verify_wormhole_proof( - &self, - proof_bytes: types::verify_wormhole_proof::ProofBytes, - ) -> ::subxt::ext::subxt_core::tx::payload::StaticPayload - { - ::subxt::ext::subxt_core::tx::payload::StaticPayload::new_static( - "Wormhole", - "verify_wormhole_proof", - types::VerifyWormholeProof { proof_bytes }, - [ - 242u8, 232u8, 238u8, 253u8, 96u8, 217u8, 86u8, 251u8, 216u8, 200u8, - 103u8, 7u8, 182u8, 218u8, 118u8, 149u8, 120u8, 244u8, 124u8, 33u8, - 133u8, 50u8, 150u8, 163u8, 187u8, 19u8, 37u8, 76u8, 73u8, 48u8, 213u8, - 193u8, - ], - ) - } - #[doc = "Transfer native tokens and store proof for wormhole"] - pub fn transfer_native( - &self, - dest: types::transfer_native::Dest, - amount: types::transfer_native::Amount, - ) -> ::subxt::ext::subxt_core::tx::payload::StaticPayload - { - ::subxt::ext::subxt_core::tx::payload::StaticPayload::new_static( - "Wormhole", - "transfer_native", - types::TransferNative { dest, amount }, - [ - 108u8, 65u8, 158u8, 226u8, 204u8, 38u8, 41u8, 193u8, 72u8, 15u8, 175u8, - 111u8, 213u8, 47u8, 70u8, 113u8, 235u8, 77u8, 160u8, 118u8, 210u8, - 134u8, 44u8, 76u8, 24u8, 231u8, 234u8, 50u8, 44u8, 75u8, 99u8, 215u8, - ], - ) - } - #[doc = "Transfer asset tokens and store proof for wormhole"] - pub fn transfer_asset( - &self, - asset_id: types::transfer_asset::AssetId, - dest: types::transfer_asset::Dest, - amount: types::transfer_asset::Amount, - ) -> ::subxt::ext::subxt_core::tx::payload::StaticPayload - { - ::subxt::ext::subxt_core::tx::payload::StaticPayload::new_static( - "Wormhole", - "transfer_asset", - types::TransferAsset { asset_id, dest, amount }, - [ - 49u8, 152u8, 37u8, 165u8, 177u8, 50u8, 177u8, 190u8, 98u8, 23u8, 130u8, - 61u8, 91u8, 175u8, 20u8, 208u8, 21u8, 95u8, 21u8, 10u8, 229u8, 132u8, - 118u8, 155u8, 74u8, 212u8, 103u8, 247u8, 138u8, 49u8, 157u8, 214u8, - ], - ) - } #[doc = "Verify an aggregated wormhole proof and process all transfers in the batch"] pub fn verify_aggregated_proof( &self, @@ -21354,7 +21376,8 @@ pub mod api { ], ) } - #[doc = " Minimum transfer amount required for proof verification"] + #[doc = " Minimum transfer amount required for wormhole transfers."] + #[doc = " This prevents dust transfers that waste storage."] pub fn minimum_transfer_amount( &self, ) -> ::subxt::ext::subxt_core::constants::address::StaticAddress< @@ -24290,19 +24313,27 @@ pub mod api { #[doc = "Contains a variant per dispatchable extrinsic that this pallet has."] pub enum Call { #[codec(index = 0)] - #[doc = "Create a new multisig account"] + #[doc = "Create a new multisig account with deterministic address"] #[doc = ""] #[doc = "Parameters:"] #[doc = "- `signers`: List of accounts that can sign for this multisig"] #[doc = "- `threshold`: Number of approvals required to execute transactions"] + #[doc = "- `nonce`: User-provided nonce for address uniqueness"] + #[doc = ""] + #[doc = "The multisig address is deterministically derived from:"] + #[doc = "hash(pallet_id || sorted_signers || threshold || nonce)"] #[doc = ""] - #[doc = "The multisig address is derived from a hash of all signers + global nonce."] - #[doc = "The creator must pay a non-refundable fee (burned)."] + #[doc = "Signers are automatically sorted before hashing, so order doesn't matter."] + #[doc = ""] + #[doc = "Economic costs:"] + #[doc = "- MultisigFee: burned immediately (spam prevention)"] + #[doc = "- MultisigDeposit: reserved until dissolution, then returned to creator (storage bond)"] create_multisig { signers: ::subxt::ext::subxt_core::alloc::vec::Vec< ::subxt::ext::subxt_core::utils::AccountId32, >, threshold: ::core::primitive::u32, + nonce: ::core::primitive::u64, }, #[codec(index = 1)] #[doc = "Propose a transaction to be executed by the multisig"] @@ -24316,11 +24347,13 @@ pub mod api { #[doc = "- A deposit (refundable - returned immediately on execution/cancellation)"] #[doc = "- A fee (non-refundable, burned immediately)"] #[doc = ""] - #[doc = "**Auto-cleanup:** Before creating a new proposal, ALL expired proposals are"] - #[doc = "automatically removed and deposits returned to original proposers. This is the primary"] - #[doc = "cleanup mechanism."] + #[doc = "**Auto-cleanup:** Before creating a new proposal, ALL proposer's expired"] + #[doc = "proposals are automatically removed. This is the primary cleanup mechanism."] #[doc = ""] #[doc = "**For threshold=1:** If the multisig threshold is 1, the proposal executes immediately."] + #[doc = ""] + #[doc = "**Weight:** Charged upfront for worst-case (high-security path with decode)."] + #[doc = "Refunded to actual cost on success based on whether HS path was taken."] propose { multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, call: ::subxt::ext::subxt_core::alloc::vec::Vec<::core::primitive::u8>, @@ -24330,16 +24363,13 @@ pub mod api { #[doc = "Approve a proposed transaction"] #[doc = ""] #[doc = "If this approval brings the total approvals to or above the threshold,"] - #[doc = "the transaction will be automatically executed."] - #[doc = ""] - #[doc = "**Auto-cleanup:** Before processing the approval, ALL expired proposals are"] - #[doc = "automatically removed and deposits returned to original proposers."] + #[doc = "the proposal status changes to `Approved` and can be executed via `execute()`."] #[doc = ""] #[doc = "Parameters:"] #[doc = "- `multisig_address`: The multisig account"] #[doc = "- `proposal_id`: ID (nonce) of the proposal to approve"] #[doc = ""] - #[doc = "Weight: Charges for MAX call size and MAX expired proposals, refunds based on actual"] + #[doc = "Weight: Charges for MAX call size, refunds based on actual"] approve { multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, proposal_id: ::core::primitive::u32, @@ -24347,14 +24377,9 @@ pub mod api { #[codec(index = 3)] #[doc = "Cancel a proposed transaction (only by proposer)"] #[doc = ""] - #[doc = "**Auto-cleanup:** Before processing the cancellation, ALL expired proposals are"] - #[doc = "automatically removed and deposits returned to original proposers."] - #[doc = ""] #[doc = "Parameters:"] #[doc = "- `multisig_address`: The multisig account"] #[doc = "- `proposal_id`: ID (nonce) of the proposal to cancel"] - #[doc = ""] - #[doc = "Weight: Charges for MAX call size and MAX expired proposals, refunds based on actual"] cancel { multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, proposal_id: ::core::primitive::u32, @@ -24386,16 +24411,39 @@ pub mod api { claim_deposits { multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, }, + #[codec(index = 7)] + #[doc = "Execute an approved proposal"] + #[doc = ""] + #[doc = "Can be called by any signer of the multisig once the proposal has reached"] + #[doc = "the approval threshold (status = Approved). The proposal must not be expired."] + #[doc = ""] + #[doc = "On execution:"] + #[doc = "- The call is decoded and dispatched as the multisig account"] + #[doc = "- Proposal is removed from storage"] + #[doc = "- Deposit is returned to the proposer"] + #[doc = ""] + #[doc = "Parameters:"] + #[doc = "- `multisig_address`: The multisig account"] + #[doc = "- `proposal_id`: ID (nonce) of the proposal to execute"] + execute { + multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, + proposal_id: ::core::primitive::u32, + }, #[codec(index = 6)] - #[doc = "Dissolve (remove) a multisig and recover the creation deposit."] + #[doc = "Approve dissolving a multisig account"] #[doc = ""] - #[doc = "Requirements:"] - #[doc = "- No proposals exist (active, executed, or cancelled) - must be fully cleaned up."] - #[doc = "- Multisig account balance must be zero."] - #[doc = "- Can be called by the creator OR any signer."] + #[doc = "Signers call this to approve dissolving the multisig."] + #[doc = "When threshold is reached, the multisig is automatically dissolved."] #[doc = ""] - #[doc = "The deposit is ALWAYS returned to the original `creator` stored in `MultisigData`."] - dissolve_multisig { + #[doc = "Requirements:"] + #[doc = "- Caller must be a signer"] + #[doc = "- No proposals exist (active, executed, or cancelled) - must be fully cleaned up"] + #[doc = "- Multisig account balance must be zero"] + #[doc = ""] + #[doc = "When threshold is reached:"] + #[doc = "- Deposit is returned to creator"] + #[doc = "- Multisig storage is removed"] + approve_dissolve { multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, }, } @@ -24482,11 +24530,17 @@ pub mod api { #[doc = "Proposal is not active (already executed or cancelled)"] ProposalNotActive, #[codec(index = 23)] + #[doc = "Proposal has not been approved yet (threshold not reached)"] + ProposalNotApproved, + #[codec(index = 24)] #[doc = "Cannot dissolve multisig with existing proposals (clear them first)"] ProposalsExist, - #[codec(index = 24)] + #[codec(index = 25)] #[doc = "Multisig account must have zero balance before dissolution"] MultisigAccountNotZero, + #[codec(index = 26)] + #[doc = "Call is not allowed for high-security multisig"] + CallNotAllowedForHighSecurityMultisig, } #[derive( :: subxt :: ext :: subxt_core :: ext :: scale_decode :: DecodeAsType, @@ -24529,6 +24583,13 @@ pub mod api { approvals_count: ::core::primitive::u32, }, #[codec(index = 3)] + #[doc = "A proposal has reached threshold and is ready to execute"] + ProposalReadyToExecute { + multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, + proposal_id: ::core::primitive::u32, + approvals_count: ::core::primitive::u32, + }, + #[codec(index = 4)] #[doc = "A proposal has been executed"] #[doc = "Contains all data needed for indexing by SubSquid"] ProposalExecuted { @@ -24542,14 +24603,14 @@ pub mod api { result: ::core::result::Result<(), runtime_types::sp_runtime::DispatchError>, }, - #[codec(index = 4)] + #[codec(index = 5)] #[doc = "A proposal has been cancelled by the proposer"] ProposalCancelled { multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, proposer: ::subxt::ext::subxt_core::utils::AccountId32, proposal_id: ::core::primitive::u32, }, - #[codec(index = 5)] + #[codec(index = 6)] #[doc = "Expired proposal was removed from storage"] ProposalRemoved { multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, @@ -24557,7 +24618,7 @@ pub mod api { proposer: ::subxt::ext::subxt_core::utils::AccountId32, removed_by: ::subxt::ext::subxt_core::utils::AccountId32, }, - #[codec(index = 6)] + #[codec(index = 7)] #[doc = "Batch deposits claimed"] DepositsClaimed { multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, @@ -24566,12 +24627,21 @@ pub mod api { proposals_removed: ::core::primitive::u32, multisig_removed: ::core::primitive::bool, }, - #[codec(index = 7)] - #[doc = "A multisig account was dissolved and deposit returned"] + #[codec(index = 8)] + #[doc = "A signer approved dissolving the multisig"] + DissolveApproved { + multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, + approver: ::subxt::ext::subxt_core::utils::AccountId32, + approvals_count: ::core::primitive::u32, + }, + #[codec(index = 9)] + #[doc = "A multisig account was dissolved (threshold reached)"] MultisigDissolved { multisig_address: ::subxt::ext::subxt_core::utils::AccountId32, - caller: ::subxt::ext::subxt_core::utils::AccountId32, - deposit_returned: ::core::primitive::u128, + deposit_returned: ::subxt::ext::subxt_core::utils::AccountId32, + approvers: ::subxt::ext::subxt_core::alloc::vec::Vec< + ::subxt::ext::subxt_core::utils::AccountId32, + >, }, } } @@ -24582,16 +24652,14 @@ pub mod api { )] #[decode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_decode")] #[encode_as_type(crate_path = ":: subxt :: ext :: subxt_core :: ext :: scale_encode")] - pub struct MultisigData<_0, _1, _2, _3, _4> { - pub signers: _2, + pub struct MultisigData<_0, _1, _2, _3> { + pub creator: _0, + pub signers: _1, pub threshold: ::core::primitive::u32, - pub nonce: ::core::primitive::u64, pub proposal_nonce: ::core::primitive::u32, - pub creator: _1, - pub deposit: _3, - pub last_activity: _0, + pub deposit: _2, pub active_proposals: ::core::primitive::u32, - pub proposals_per_signer: _4, + pub proposals_per_signer: _3, } #[derive( :: subxt :: ext :: subxt_core :: ext :: scale_decode :: DecodeAsType, @@ -24619,8 +24687,10 @@ pub mod api { #[codec(index = 0)] Active, #[codec(index = 1)] - Executed, + Approved, #[codec(index = 2)] + Executed, + #[codec(index = 3)] Cancelled, } } @@ -27507,33 +27577,7 @@ pub mod api { )] #[doc = "Contains a variant per dispatchable extrinsic that this pallet has."] pub enum Call { - #[codec(index = 0)] - verify_wormhole_proof { - proof_bytes: - ::subxt::ext::subxt_core::alloc::vec::Vec<::core::primitive::u8>, - }, - #[codec(index = 1)] - #[doc = "Transfer native tokens and store proof for wormhole"] - transfer_native { - dest: ::subxt::ext::subxt_core::utils::MultiAddress< - ::subxt::ext::subxt_core::utils::AccountId32, - (), - >, - #[codec(compact)] - amount: ::core::primitive::u128, - }, #[codec(index = 2)] - #[doc = "Transfer asset tokens and store proof for wormhole"] - transfer_asset { - asset_id: ::core::primitive::u32, - dest: ::subxt::ext::subxt_core::utils::MultiAddress< - ::subxt::ext::subxt_core::utils::AccountId32, - (), - >, - #[codec(compact)] - amount: ::core::primitive::u128, - }, - #[codec(index = 3)] #[doc = "Verify an aggregated wormhole proof and process all transfers in the batch"] verify_aggregated_proof { proof_bytes: @@ -27574,22 +27618,19 @@ pub mod api { #[codec(index = 9)] InvalidBlockNumber, #[codec(index = 10)] - AssetNotFound, - #[codec(index = 11)] - SelfTransfer, - #[codec(index = 12)] AggregatedVerifierNotAvailable, - #[codec(index = 13)] + #[codec(index = 11)] AggregatedProofDeserializationFailed, - #[codec(index = 14)] + #[codec(index = 12)] AggregatedVerificationFailed, - #[codec(index = 15)] + #[codec(index = 13)] InvalidAggregatedPublicInputs, - #[codec(index = 16)] - TransferAmountBelowMinimum, - #[codec(index = 17)] + #[codec(index = 14)] #[doc = "The volume fee rate in the proof doesn't match the configured rate"] InvalidVolumeFeeRate, + #[codec(index = 15)] + #[doc = "Transfer amount is below the minimum required"] + TransferAmountBelowMinimum, } #[derive( :: subxt :: ext :: subxt_core :: ext :: scale_decode :: DecodeAsType, diff --git a/src/cli/mod.rs b/src/cli/mod.rs index 858ba27..5133ac4 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -282,6 +282,25 @@ pub enum Commands { pub enum DeveloperCommands { /// Create standard test wallets (crystal_alice, crystal_bob, crystal_charlie) CreateTestWallets, + + /// Build wormhole circuit binaries and copy to CLI and chain directories + BuildCircuits { + /// Path to qp-zk-circuits repository (default: ../qp-zk-circuits) + #[arg(long, default_value = "../qp-zk-circuits")] + circuits_path: String, + + /// Path to chain repository (default: ../chain) + #[arg(long, default_value = "../chain")] + chain_path: String, + + /// Number of leaf proofs aggregated into a single proof + #[arg(long)] + num_leaf_proofs: usize, + + /// Skip copying to chain directory + #[arg(long)] + skip_chain: bool, + }, } /// Execute a CLI command @@ -370,13 +389,7 @@ pub async fn execute_command( log_print!("💰 Balance: {}", formatted_balance); Ok(()) }, - Commands::Developer(dev_cmd) => match dev_cmd { - DeveloperCommands::CreateTestWallets => { - let _ = crate::cli::handle_developer_command(DeveloperCommands::CreateTestWallets) - .await; - Ok(()) - }, - }, + Commands::Developer(dev_cmd) => handle_developer_command(dev_cmd).await, Commands::Events { block, block_hash, latest: _, finalized, pallet, raw, no_decode } => events::handle_events_command( block, block_hash, finalized, pallet, raw, !no_decode, node_url, @@ -538,7 +551,159 @@ pub async fn handle_developer_command(command: DeveloperCommands) -> crate::erro Ok(()) }, + DeveloperCommands::BuildCircuits { + circuits_path, + chain_path, + num_leaf_proofs, + skip_chain, + } => build_wormhole_circuits(&circuits_path, &chain_path, num_leaf_proofs, skip_chain).await, + } +} + +/// Build wormhole circuit binaries and copy them to the appropriate locations +async fn build_wormhole_circuits( + circuits_path: &str, + chain_path: &str, + num_leaf_proofs: usize, + skip_chain: bool, +) -> crate::error::Result<()> { + use std::{path::Path, process::Command}; + + log_print!("Building ZK circuit binaries (num_leaf_proofs={})", num_leaf_proofs); + log_print!(""); + + let circuits_dir = Path::new(circuits_path); + let chain_dir = Path::new(chain_path); + + // Verify circuits directory exists + if !circuits_dir.exists() { + return Err(crate::error::QuantusError::Generic(format!( + "Circuits directory not found: {}", + circuits_path + ))); + } + + // Step 1: Build the circuit builder + log_print!("Step 1/4: Building circuit builder..."); + let build_output = Command::new("cargo") + .args(["build", "--release", "-p", "qp-wormhole-circuit-builder"]) + .current_dir(circuits_dir) + .output() + .map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to run cargo build: {}", e)) + })?; + + if !build_output.status.success() { + let stderr = String::from_utf8_lossy(&build_output.stderr); + return Err(crate::error::QuantusError::Generic(format!( + "Circuit builder compilation failed:\n{}", + stderr + ))); + } + log_success!(" Done"); + + // Step 2: Run the circuit builder to generate binaries + log_print!("Step 2/4: Generating circuit binaries (this may take a while)..."); + let builder_path = circuits_dir.join("target/release/qp-wormhole-circuit-builder"); + let run_output = Command::new(&builder_path) + .args(["--num-leaf-proofs", &num_leaf_proofs.to_string()]) + .current_dir(circuits_dir) + .output() + .map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to run circuit builder: {}", e)) + })?; + + if !run_output.status.success() { + let stderr = String::from_utf8_lossy(&run_output.stderr); + return Err(crate::error::QuantusError::Generic(format!( + "Circuit builder failed:\n{}", + stderr + ))); + } + log_success!(" Done"); + + // Step 3: Copy binaries to CLI and touch aggregator to force recompile + log_print!("Step 3/4: Copying binaries to CLI..."); + let source_bins = circuits_dir.join("generated-bins"); + let cli_bins = Path::new("generated-bins"); + + let cli_bin_files = [ + "common.bin", + "verifier.bin", + "prover.bin", + "dummy_proof.bin", + "aggregated_common.bin", + "aggregated_verifier.bin", + "config.json", + ]; + + for file in &cli_bin_files { + let src = source_bins.join(file); + let dst = cli_bins.join(file); + std::fs::copy(&src, &dst).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to copy {} to CLI: {}", file, e)) + })?; + log_verbose!(" Copied {}", file); + } + + // Touch aggregator lib.rs to force cargo to recompile it + let aggregator_lib = circuits_dir.join("wormhole/aggregator/src/lib.rs"); + if aggregator_lib.exists() { + if let Ok(file) = std::fs::OpenOptions::new().write(true).open(&aggregator_lib) { + let _ = file.set_modified(std::time::SystemTime::now()); + } } + log_success!(" Done"); + + // Step 4: Copy binaries to chain directory (if not skipped) + if !skip_chain { + log_print!("Step 4/4: Copying binaries to chain..."); + + if !chain_dir.exists() { + log_error!(" Chain directory not found: {}", chain_path); + log_print!(" Use --skip-chain to skip this step"); + } else { + let chain_bins = chain_dir.join("pallets/wormhole"); + + let chain_bin_files = + ["aggregated_common.bin", "aggregated_verifier.bin", "config.json"]; + + for file in &chain_bin_files { + let src = source_bins.join(file); + let dst = chain_bins.join(file); + std::fs::copy(&src, &dst).map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Failed to copy {} to chain: {}", + file, e + )) + })?; + log_verbose!(" Copied {}", file); + } + + // Touch pallet lib.rs to force cargo to recompile it + let pallet_lib = chain_bins.join("src/lib.rs"); + if pallet_lib.exists() { + if let Ok(file) = std::fs::OpenOptions::new().write(true).open(&pallet_lib) { + let _ = file.set_modified(std::time::SystemTime::now()); + } + } + log_success!(" Done"); + } + } else { + log_print!("Step 4/4: Skipping chain copy (--skip-chain)"); + } + + log_print!(""); + log_success!("Circuit build complete!"); + log_print!(""); + if !skip_chain { + log_print!("{}", "Next steps:".bright_blue().bold()); + log_print!(" 1. Rebuild chain: cd {} && cargo build --release", chain_path); + log_print!(" 2. Restart the chain node"); + log_print!(""); + } + + Ok(()) } /// Handle compatibility check command diff --git a/src/cli/wormhole.rs b/src/cli/wormhole.rs index cd6feb3..4d198f8 100644 --- a/src/cli/wormhole.rs +++ b/src/cli/wormhole.rs @@ -3,30 +3,34 @@ use crate::{ client::{ChainConfig, QuantusClient}, quantus_subxt::{self as quantus_node, api::wormhole}, }, - cli::common::{submit_transaction, ExecutionMode}, - log_print, log_success, log_verbose, - wallet::QuantumKeyPair, + cli::{ + common::{submit_transaction, ExecutionMode}, + send::get_balance, + }, + log_error, log_print, log_success, log_verbose, + wallet::{password, QuantumKeyPair, WalletManager}, }; use clap::Subcommand; -use plonky2::plonk::{circuit_data::CircuitConfig, proof::ProofWithPublicInputs}; -use qp_poseidon::PoseidonHasher; +use indicatif::{ProgressBar, ProgressStyle}; +use plonky2::plonk::proof::ProofWithPublicInputs; +use qp_rusty_crystals_hdwallet::{ + derive_wormhole_from_mnemonic, generate_mnemonic, SensitiveBytes32, WormholePair, + QUANTUS_WORMHOLE_CHAIN_ID, +}; use qp_wormhole_circuit::{ - inputs::{ - AggregatedPublicCircuitInputs, CircuitInputs, PrivateCircuitInputs, PublicCircuitInputs, - }, + inputs::{CircuitInputs, ParseAggregatedPublicInputs, PrivateCircuitInputs}, nullifier::Nullifier, }; +use qp_wormhole_inputs::{AggregatedPublicCircuitInputs, PublicCircuitInputs}; use qp_wormhole_prover::WormholeProver; -use qp_wormhole_verifier::WormholeVerifier; use qp_zk_circuits_common::{ circuit::{C, D, F}, storage_proof::prepare_proof_for_circuit, - utils::{BytesDigest, Digest}, -}; -use sp_core::{ - crypto::{AccountId32, Ss58Codec}, - Hasher, + utils::{digest_felts_to_bytes, BytesDigest}, }; +use rand::RngCore; +use sp_core::crypto::{AccountId32, Ss58Codec}; +use std::path::Path; use subxt::{ backend::legacy::rpc_methods::ReadProof, blocks::Block, @@ -48,6 +52,95 @@ pub const SCALE_DOWN_FACTOR: u128 = 10_000_000_000; /// This must match the on-chain VolumeFeeRateBps configuration pub const VOLUME_FEE_BPS: u32 = 10; +/// SHA256 hashes of circuit binary files for integrity verification. +/// Must match the BinaryHashes struct in qp-wormhole-aggregator/src/config.rs +#[derive(Debug, Clone, serde::Deserialize, Default)] +pub struct BinaryHashes { + pub prover: Option, + pub aggregated_common: Option, + pub aggregated_verifier: Option, + pub dummy_proof: Option, +} + +/// Aggregation config loaded from generated-bins/config.json. +/// Must match the CircuitBinsConfig struct in qp-wormhole-aggregator/src/config.rs +#[derive(Debug, Clone, serde::Deserialize)] +pub struct AggregationConfig { + pub num_leaf_proofs: usize, + #[serde(default)] + pub hashes: Option, +} + +impl AggregationConfig { + /// Load config from the generated-bins directory + pub fn load_from_bins() -> crate::error::Result { + let config_path = Path::new("generated-bins/config.json"); + let config_str = std::fs::read_to_string(config_path).map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Failed to read aggregation config from {}: {}. Run 'quantus developer build-circuits' first.", + config_path.display(), + e + )) + })?; + serde_json::from_str(&config_str).map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Failed to parse aggregation config: {}", + e + )) + }) + } + + /// Verify that the binary files in generated-bins match the stored hashes. + pub fn verify_binary_hashes(&self) -> crate::error::Result<()> { + use sha2::{Digest, Sha256}; + + let Some(ref stored_hashes) = self.hashes else { + log_verbose!(" No hashes in config.json, skipping binary verification"); + return Ok(()); + }; + + let bins_dir = Path::new("generated-bins"); + let mut mismatches = Vec::new(); + + let hash_file = |filename: &str| -> Option { + let path = bins_dir.join(filename); + std::fs::read(&path).ok().map(|bytes| { + let hash = Sha256::digest(&bytes); + hex::encode(hash) + }) + }; + + let checks = [ + ("aggregated_common.bin", &stored_hashes.aggregated_common), + ("aggregated_verifier.bin", &stored_hashes.aggregated_verifier), + ("prover.bin", &stored_hashes.prover), + ("dummy_proof.bin", &stored_hashes.dummy_proof), + ]; + for (filename, expected_hash) in checks { + if let Some(ref expected) = expected_hash { + if let Some(actual) = hash_file(filename) { + if expected != &actual { + mismatches.push(format!("{}...", filename)); + } + } + } + } + + if mismatches.is_empty() { + log_verbose!(" Binary hashes verified successfully"); + Ok(()) + } else { + Err(crate::error::QuantusError::Generic(format!( + "Binary hash mismatch detected! The circuit binaries do not match config.json.\n\ + This can happen if binaries were regenerated but the CLI wasn't rebuilt.\n\ + Mismatches:\n {}\n\n\ + To fix: Run 'quantus developer build-circuits' and then 'cargo build --release'", + mismatches.join("\n ") + ))) + } + } +} + /// Compute output amount after fee deduction /// output = input * (10000 - fee_bps) / 10000 pub fn compute_output_amount(input_amount: u32, fee_bps: u32) -> u32 { @@ -111,75 +204,374 @@ pub fn write_proof_file(path: &str, proof_bytes: &[u8]) -> Result<(), String> { std::fs::write(path, proof_hex).map_err(|e| format!("Failed to write proof file: {}", e)) } -/// Validate aggregation parameters -pub fn validate_aggregation_params( - num_proofs: usize, - depth: usize, - branching_factor: usize, -) -> Result { - if num_proofs == 0 { - return Err("No proofs provided".to_string()); +/// Format a balance amount from raw units (12 decimals) to human-readable format +pub fn format_balance(amount: u128) -> String { + let whole = amount / 1_000_000_000_000; + let frac = (amount % 1_000_000_000_000) / 10_000_000_000; // 2 decimal places + format!("{}.{:02} DEV", whole, frac) +} + +/// Randomly partition a total amount into n parts. +/// Each part will be at least `min_per_part` and the sum equals `total`. +/// Returns amounts aligned to SCALE_DOWN_FACTOR for clean quantization. +pub fn random_partition(total: u128, n: usize, min_per_part: u128) -> Vec { + use rand::Rng; + + if n == 0 { + return vec![]; + } + if n == 1 { + return vec![total]; } - if branching_factor < 2 { - return Err("Branching factor must be at least 2".to_string()); + // Ensure minimum is achievable + let min_total = min_per_part * n as u128; + if total < min_total { + // Fall back to equal distribution if total is too small + let per_part = total / n as u128; + let remainder = total % n as u128; + let mut parts: Vec = vec![per_part; n]; + // Add remainder to last part + parts[n - 1] += remainder; + return parts; } - if depth == 0 { - return Err("Depth must be at least 1".to_string()); + // Amount available for random distribution after ensuring minimums + let distributable = total - min_total; + + // Generate n-1 random cut points in [0, distributable] + let mut rng = rand::rng(); + let mut cuts: Vec = (0..n - 1).map(|_| rng.random_range(0..=distributable)).collect(); + cuts.sort(); + + // Convert cuts to amounts + let mut parts = Vec::with_capacity(n); + let mut prev = 0u128; + for cut in cuts { + parts.push(min_per_part + (cut - prev)); + prev = cut; + } + parts.push(min_per_part + (distributable - prev)); + + // Note: Input 'total' is already in quantized units (e.g., 998 = 9.98 DEV). + // No further alignment is needed - just ensure the sum equals total. + let sum: u128 = parts.iter().sum(); + let diff = total as i128 - sum as i128; + if diff != 0 { + // Add/subtract difference from a random part + let idx = rng.random_range(0..n); + parts[idx] = (parts[idx] as i128 + diff).max(0) as u128; } - // Calculate max leaf proofs for given depth and branching factor - let max_leaf_proofs = branching_factor.pow(depth as u32); + parts +} - if num_proofs > max_leaf_proofs { - return Err(format!( - "Too many proofs: {} provided, max {} for depth={} branching_factor={}", - num_proofs, max_leaf_proofs, depth, branching_factor - )); +/// Output assignment for a single proof (supports dual outputs) +#[derive(Debug, Clone)] +pub struct ProofOutputAssignment { + /// Amount for output 1 (quantized, 2 decimal places) + pub output_amount_1: u32, + /// Exit account for output 1 + pub exit_account_1: [u8; 32], + /// Amount for output 2 (quantized, 0 if unused) + pub output_amount_2: u32, + /// Exit account for output 2 (all zeros if unused) + pub exit_account_2: [u8; 32], +} + +/// Compute random output assignments for a set of proofs. +/// +/// This takes the input amounts for each proof and randomly distributes the outputs +/// across the target exit accounts. Each proof can have up to 2 outputs. +/// +/// # Algorithm: +/// 1. Compute total output amount (sum of inputs after fee deduction) +/// 2. Randomly partition total output across all target addresses +/// 3. Greedily assign outputs to proofs, using dual outputs when necessary +/// +/// # Arguments +/// * `input_amounts` - The input amount for each proof (in planck, before fee) +/// * `target_accounts` - The exit accounts to distribute outputs to +/// * `fee_bps` - Fee in basis points +/// +/// # Returns +/// A vector of output assignments, one per proof +pub fn compute_random_output_assignments( + input_amounts: &[u128], + target_accounts: &[[u8; 32]], + fee_bps: u32, +) -> Vec { + use rand::seq::SliceRandom; + + let num_proofs = input_amounts.len(); + let num_targets = target_accounts.len(); + + if num_proofs == 0 || num_targets == 0 { + return vec![]; + } + + // Step 1: Compute output amounts per proof (after fee deduction) + let proof_outputs: Vec = input_amounts + .iter() + .map(|&input| { + let input_quantized = quantize_funding_amount(input).unwrap_or(0); + compute_output_amount(input_quantized, fee_bps) + }) + .collect(); + + let total_output: u64 = proof_outputs.iter().map(|&x| x as u64).sum(); + + // Step 2: Randomly partition total output across target accounts + // Minimum 1 quantized unit (0.01 DEV) per target to ensure all targets receive funds + let min_per_target = 1u128; + let target_amounts_u128 = random_partition(total_output as u128, num_targets, min_per_target); + let target_amounts: Vec = target_amounts_u128.iter().map(|&x| x as u32).collect(); + + // Step 3: Assign outputs to proofs. + // Each proof can have at most 2 outputs to different targets. + // + // Strategy: + // Pass 1 - Guarantee every target gets at least one output slot by round-robin + // assigning each target as output_1 of successive proofs. + // Pass 2 - Fill remaining capacity (output_2 slots and any leftover amounts) + // greedily from targets that still have remaining allocation. + // + // This ensures every target address appears in at least one proof output, + // which is critical for the multiround flow where each target is a next-round + // wormhole address that must receive minted tokens. + + let mut rng = rand::rng(); + + // Track remaining needs per target + let mut target_remaining: Vec = target_amounts.clone(); + + // Pre-allocate assignments with output_1 = full proof output, output_2 = 0 + let mut assignments: Vec = proof_outputs + .iter() + .map(|&po| ProofOutputAssignment { + output_amount_1: po, + exit_account_1: [0u8; 32], + output_amount_2: 0, + exit_account_2: [0u8; 32], + }) + .collect(); + + // Pass 1: Round-robin assign each target to a proof's output_1. + // If num_targets <= num_proofs, each target gets its own proof. + // If num_targets > num_proofs, later targets share proofs via output_2. + let mut shuffled_targets: Vec = (0..num_targets).collect(); + shuffled_targets.shuffle(&mut rng); + + for (assign_idx, &tidx) in shuffled_targets.iter().enumerate() { + let proof_idx = assign_idx % num_proofs; + let assignment = &mut assignments[proof_idx]; + + if assignment.exit_account_1 == [0u8; 32] { + // First target for this proof -> use output_1 + let assign = assignment.output_amount_1.min(target_remaining[tidx]); + assignment.exit_account_1 = target_accounts[tidx]; + // We'll fix up the exact amounts in pass 2; for now just mark the account + assignment.output_amount_1 = assign; + target_remaining[tidx] -= assign; + } else if assignment.exit_account_2 == [0u8; 32] { + // Second target for this proof -> use output_2 + let avail = proof_outputs[proof_idx].saturating_sub(assignment.output_amount_1); + let assign = avail.min(target_remaining[tidx]); + assignment.exit_account_2 = target_accounts[tidx]; + assignment.output_amount_2 = assign; + target_remaining[tidx] -= assign; + } + // If both slots taken, skip (shouldn't happen when num_targets <= 2*num_proofs) + } + + // Pass 2: Distribute any remaining target allocations into available proof outputs. + // Also ensure each proof's output_1 + output_2 == proof_outputs[i]. + for proof_idx in 0..num_proofs { + let total_proof_output = proof_outputs[proof_idx]; + let current_sum = + assignments[proof_idx].output_amount_1 + assignments[proof_idx].output_amount_2; + let mut shortfall = total_proof_output.saturating_sub(current_sum); + + if shortfall > 0 { + // Add shortfall to output_1 (its account is already set) + assignments[proof_idx].output_amount_1 += shortfall; + shortfall = 0; + } + + // If output_1_account is still [0;32] (shouldn't happen), assign first target as fallback + if assignments[proof_idx].exit_account_1 == [0u8; 32] && num_targets > 0 { + assignments[proof_idx].exit_account_1 = target_accounts[0]; + } + + let _ = shortfall; // suppress unused warning + } + + assignments +} + +/// Result of checking proof verification events +pub struct VerificationResult { + pub success: bool, + pub exit_amount: Option, + pub error_message: Option, +} + +/// Check for proof verification events in a transaction +/// Returns whether ProofVerified event was found and the exit amount +async fn check_proof_verification_events( + client: &subxt::OnlineClient, + block_hash: &subxt::utils::H256, + tx_hash: &subxt::utils::H256, + verbose: bool, +) -> crate::error::Result { + use crate::chain::quantus_subxt::api::system::events::ExtrinsicFailed; + use colored::Colorize; + + let block = client.blocks().at(*block_hash).await.map_err(|e| { + crate::error::QuantusError::NetworkError(format!("Failed to get block: {e:?}")) + })?; + + let extrinsics = block.extrinsics().await.map_err(|e| { + crate::error::QuantusError::NetworkError(format!("Failed to get extrinsics: {e:?}")) + })?; + + // Find our extrinsic index + let our_extrinsic_index = extrinsics + .iter() + .enumerate() + .find(|(_, ext)| ext.hash() == *tx_hash) + .map(|(idx, _)| idx); + + let events = block.events().await.map_err(|e| { + crate::error::QuantusError::NetworkError(format!("Failed to fetch events: {e:?}")) + })?; + + let metadata = client.metadata(); + + let mut verification_result = + VerificationResult { success: false, exit_amount: None, error_message: None }; + + if verbose { + log_print!(""); + log_print!("📋 Transaction Events:"); + } + + if let Some(ext_idx) = our_extrinsic_index { + for event_result in events.iter() { + let event = event_result.map_err(|e| { + crate::error::QuantusError::NetworkError(format!("Failed to decode event: {e:?}")) + })?; + + // Only process events for our extrinsic + if let subxt::events::Phase::ApplyExtrinsic(event_ext_idx) = event.phase() { + if event_ext_idx != ext_idx as u32 { + continue; + } + + // Display event in verbose mode + if verbose { + log_print!( + " 📌 {}.{}", + event.pallet_name().bright_cyan(), + event.variant_name().bright_yellow() + ); + + // Try to decode and display event details + if let Ok(typed_event) = + event.as_root_event::() + { + log_print!(" 📝 {:?}", typed_event); + } + } + + // Check for ProofVerified event + if let Ok(Some(proof_verified)) = + event.as_event::() + { + verification_result.success = true; + verification_result.exit_amount = Some(proof_verified.exit_amount); + } + + // Check for ExtrinsicFailed event + if let Ok(Some(ExtrinsicFailed { dispatch_error, .. })) = + event.as_event::() + { + let error_msg = format_dispatch_error(&dispatch_error, &metadata); + verification_result.success = false; + verification_result.error_message = Some(error_msg); + } + } + } + } + + if verbose { + log_print!(""); } - Ok(max_leaf_proofs) + Ok(verification_result) +} + +/// Format dispatch error for display +fn format_dispatch_error( + error: &crate::chain::quantus_subxt::api::runtime_types::sp_runtime::DispatchError, + metadata: &subxt::Metadata, +) -> String { + use crate::chain::quantus_subxt::api::runtime_types::sp_runtime::DispatchError; + + match error { + DispatchError::Module(module_error) => { + let pallet_name = metadata + .pallet_by_index(module_error.index) + .map(|p| p.name()) + .unwrap_or("Unknown"); + let error_index = module_error.error[0]; + format!("{}::Error[{}]", pallet_name, error_index) + }, + DispatchError::BadOrigin => "BadOrigin".to_string(), + DispatchError::CannotLookup => "CannotLookup".to_string(), + DispatchError::Other => "Other".to_string(), + _ => format!("{:?}", error), + } } #[derive(Subcommand, Debug)] pub enum WormholeCommands { - /// Generate a wormhole proof - Generate { - /// Secret (32-byte hex string) + /// Derive the unspendable wormhole address from a secret + Address { + /// Secret (32-byte hex string) - used to derive the unspendable account + #[arg(long)] + secret: String, + }, + /// Generate a wormhole proof from an existing transfer + Prove { + /// Secret (32-byte hex string) used for the transfer #[arg(long)] secret: String, - /// Funding amount to transfer + /// Funding amount that was transferred #[arg(long)] amount: u128, - /// Exit account (where funds will be withdrawn) + /// Exit account (where funds will be withdrawn, hex or SS58) #[arg(long)] exit_account: String, - /// Wallet name to fund from - #[arg(short, long)] - from: String, + /// Block hash to generate proof against (hex) + #[arg(long)] + block: String, - /// Password for the wallet - #[arg(short, long)] - password: Option, + /// Transfer count from the transfer event + #[arg(long)] + transfer_count: u64, - /// Read password from file + /// Funding account (sender of transfer, hex or SS58) #[arg(long)] - password_file: Option, + funding_account: String, /// Output file for the proof (default: proof.hex) #[arg(short, long, default_value = "proof.hex")] output: String, }, - /// Verify a single wormhole proof on-chain - Verify { - /// Path to the proof file (hex-encoded) - #[arg(short, long, default_value = "proof.hex")] - proof: String, - }, /// Aggregate multiple wormhole proofs into a single proof Aggregate { /// Input proof files (hex-encoded) @@ -189,14 +581,6 @@ pub enum WormholeCommands { /// Output file for the aggregated proof (default: aggregated_proof.hex) #[arg(short, long, default_value = "aggregated_proof.hex")] output: String, - - /// Tree depth for aggregation (default: 1) - #[arg(long, default_value = "1")] - depth: usize, - - /// Branching factor for aggregation tree (default: 2) - #[arg(long, default_value = "2")] - branching_factor: usize, }, /// Verify an aggregated wormhole proof on-chain VerifyAggregated { @@ -204,6 +588,58 @@ pub enum WormholeCommands { #[arg(short, long, default_value = "aggregated_proof.hex")] proof: String, }, + /// Parse and display the contents of a proof file (for debugging) + ParseProof { + /// Path to the proof file (hex-encoded) + #[arg(short, long)] + proof: String, + + /// Parse as aggregated proof (default: false, parses as leaf proof) + #[arg(long)] + aggregated: bool, + + /// Verify the proof cryptographically (local verification, not on-chain) + #[arg(long)] + verify: bool, + }, + /// Run a multi-round wormhole test: wallet -> wormhole -> ... -> wallet + Multiround { + /// Number of proofs per round (default: 2, max: 8) + #[arg(short, long, default_value = "2")] + num_proofs: usize, + + /// Number of rounds (default: 2) + #[arg(short, long, default_value = "2")] + rounds: usize, + + /// Total amount in planck to partition across all proofs (default: 100 DEV) + #[arg(short, long, default_value = "100000000000000")] + amount: u128, + + /// Wallet name to use for funding and final exit + #[arg(short, long)] + wallet: String, + + /// Password for the wallet + #[arg(short, long)] + password: Option, + + /// Read password from file + #[arg(long)] + password_file: Option, + + /// Keep proof files after completion + #[arg(short, long)] + keep_files: bool, + + /// Output directory for proof files + #[arg(short, long, default_value = "/tmp/wormhole_multiround")] + output_dir: String, + + /// Dry run - show what would be done without executing + #[arg(long)] + dry_run: bool, + }, } pub async fn handle_wormhole_command( @@ -211,74 +647,100 @@ pub async fn handle_wormhole_command( node_url: &str, ) -> crate::error::Result<()> { match command { - WormholeCommands::Generate { + WormholeCommands::Address { secret } => show_wormhole_address(secret), + WormholeCommands::Prove { secret, amount, exit_account, - from, + block, + transfer_count, + funding_account, + output, + } => { + log_print!("Generating proof from existing transfer..."); + + // Connect to node + let quantus_client = QuantusClient::new(node_url).await.map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to connect: {}", e)) + })?; + + // Parse exit account + let exit_account_bytes = + parse_exit_account(&exit_account).map_err(crate::error::QuantusError::Generic)?; + + // Quantize amount and compute output (single output, no change) + let input_amount_quantized = + quantize_funding_amount(amount).map_err(crate::error::QuantusError::Generic)?; + let output_amount = compute_output_amount(input_amount_quantized, VOLUME_FEE_BPS); + + let output_assignment = ProofOutputAssignment { + output_amount_1: output_amount, + exit_account_1: exit_account_bytes, + output_amount_2: 0, + exit_account_2: [0u8; 32], + }; + + let prove_start = std::time::Instant::now(); + generate_proof( + &secret, + amount, + &output_assignment, + &block, + transfer_count, + &funding_account, + &output, + &quantus_client, + ) + .await?; + let prove_elapsed = prove_start.elapsed(); + log_print!("Proof generation: {:.2}s", prove_elapsed.as_secs_f64()); + Ok(()) + }, + WormholeCommands::Aggregate { proofs, output } => aggregate_proofs(proofs, output).await, + WormholeCommands::VerifyAggregated { proof } => + verify_aggregated_proof(proof, node_url).await, + WormholeCommands::ParseProof { proof, aggregated, verify } => + parse_proof_file(proof, aggregated, verify).await, + WormholeCommands::Multiround { + num_proofs, + rounds, + amount, + wallet, password, password_file, - output, + keep_files, + output_dir, + dry_run, } => - generate_proof( - secret, + run_multiround( + num_proofs, + rounds, amount, - exit_account, - from, + wallet, password, password_file, - output, + keep_files, + output_dir, + dry_run, node_url, ) .await, - WormholeCommands::Verify { proof } => verify_proof(proof, node_url).await, - WormholeCommands::Aggregate { proofs, output, depth, branching_factor } => - aggregate_proofs(proofs, output, depth, branching_factor).await, - WormholeCommands::VerifyAggregated { proof } => - verify_aggregated_proof(proof, node_url).await, } } pub type TransferProofKey = (u32, u64, AccountId32, AccountId32, u128); -async fn generate_proof( - secret_hex: String, - funding_amount: u128, - exit_account_str: String, - from_wallet: String, - password: Option, - password_file: Option, - output_file: String, - node_url: &str, -) -> crate::error::Result<()> { - log_print!("Generating wormhole proof..."); +/// Derive and display the unspendable wormhole address from a secret. +/// Users can then send funds to this address using `quantus send`. +fn show_wormhole_address(secret_hex: String) -> crate::error::Result<()> { + use colored::Colorize; - // Parse secret using helper function let secret_array = parse_secret_hex(&secret_hex).map_err(crate::error::QuantusError::Generic)?; let secret: BytesDigest = secret_array.try_into().map_err(|e| { crate::error::QuantusError::Generic(format!("Failed to convert secret: {:?}", e)) })?; - // Parse exit account using helper function - let exit_account_bytes = - parse_exit_account(&exit_account_str).map_err(crate::error::QuantusError::Generic)?; - let exit_account_id = SubxtAccountId(exit_account_bytes); - - // Load keypair - let keypair = crate::wallet::load_keypair_from_wallet(&from_wallet, password, password_file)?; - - // Connect to node - let quantus_client = QuantusClient::new(node_url) - .await - .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to connect: {}", e)))?; - let client = quantus_client.client(); - - log_verbose!("Connected to node"); - - let funding_account = AccountId32::new(PoseidonHasher::hash(keypair.public_key.as_ref()).0); - - // Generate unspendable account let unspendable_account = qp_wormhole_circuit::unspendable_account::UnspendableAccount::from_secret(secret) .account_id; @@ -288,443 +750,1568 @@ async fn generate_proof( .as_ref() .try_into() .expect("BytesDigest is always 32 bytes"); - let unspendable_account_id = SubxtAccountId(unspendable_account_bytes); - log_verbose!("Unspendable account: {:?}", &unspendable_account_id); - log_verbose!("Exit account: {:?}", &exit_account_id); + let account_id = sp_core::crypto::AccountId32::new(unspendable_account_bytes); + let ss58_address = + account_id.to_ss58check_with_version(sp_core::crypto::Ss58AddressFormat::custom(189)); - // Transfer to unspendable account using wormhole pallet - let transfer_tx = quantus_node::api::tx().wormhole().transfer_native( - subxt::ext::subxt_core::utils::MultiAddress::Id(unspendable_account_id.clone()), - funding_amount, - ); + log_print!("{}", "Wormhole Address".bright_cyan()); + log_print!(" SS58: {}", ss58_address.bright_green()); + log_print!(" Hex: 0x{}", hex::encode(unspendable_account_bytes)); + log_print!(""); + log_print!("To fund this address:"); + log_print!(" quantus send --from --to {} --amount ", ss58_address); - log_verbose!("Submitting transfer to unspendable account..."); + Ok(()) +} - let quantum_keypair = QuantumKeyPair { - public_key: keypair.public_key.clone(), - private_key: keypair.private_key.clone(), - }; +async fn at_best_block( + quantus_client: &QuantusClient, +) -> anyhow::Result>> { + let best_block = quantus_client.get_latest_block().await?; + let block = quantus_client.client().blocks().at(best_block).await?; + Ok(block) +} - submit_transaction( - &quantus_client, - &quantum_keypair, - transfer_tx, - None, - ExecutionMode { finalized: false, wait_for_transaction: true }, - ) - .await - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; +async fn aggregate_proofs( + proof_files: Vec, + output_file: String, +) -> crate::error::Result<()> { + use qp_wormhole_aggregator::aggregator::WormholeProofAggregator; + use qp_zk_circuits_common::aggregation::AggregationConfig as AggConfig; - let blocks = at_best_block(&quantus_client) - .await - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; - let block_hash = blocks.hash(); + use std::path::Path; - log_success!("Transfer included in block: {:?}", block_hash); + log_print!("Aggregating {} proofs...", proof_files.len()); - let events_api = client - .events() - .at(block_hash) - .await - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + // Load config first to validate and calculate padding needs + let bins_dir = Path::new("generated-bins"); + let agg_config = AggregationConfig::load_from_bins()?; + + // Verify binary hashes match config.json to detect stale binaries + log_verbose!("Verifying circuit binary integrity..."); + agg_config.verify_binary_hashes()?; + + // Validate number of proofs before doing expensive work + if proof_files.len() > agg_config.num_leaf_proofs { + return Err(crate::error::QuantusError::Generic(format!( + "Too many proofs: {} provided, max {} supported by circuit", + proof_files.len(), + agg_config.num_leaf_proofs + ))); + } - let event = events_api - .find::() - .next() - .ok_or_else(|| { - crate::error::QuantusError::Generic("No NativeTransferred event found".to_string()) - })? - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + let num_padding_proofs = agg_config.num_leaf_proofs - proof_files.len(); - log_verbose!( - "Transfer event: amount={}, transfer_count={}", - event.amount, - event.transfer_count - ); + // Create progress bar for padding proof generation (if needed) + // Load aggregator from pre-built bins (reads config from config.json) + // This also generates the padding (dummy) proofs needed + log_print!(" Loading aggregator and generating {} dummy proofs...", num_padding_proofs); - // Get storage proof - let storage_api = client.storage().at(block_hash); + let aggr_config = AggConfig::new(agg_config.num_leaf_proofs); + let mut aggregator = WormholeProofAggregator::from_prebuilt_dir(bins_dir, aggr_config) + .map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Failed to load aggregator from pre-built bins: {}", + e + )) + })?; - // Convert subxt AccountId32 to sp_core AccountId32 for hash_storage - let from_account = AccountId32::new(event.from.0); - let to_account = AccountId32::new(event.to.0); + log_verbose!("Aggregation config: num_leaf_proofs={}", aggregator.config.num_leaf_proofs); + let common_data = aggregator.leaf_circuit_data.common.clone(); - let leaf_hash = qp_poseidon::PoseidonHasher::hash_storage::( - &( - NATIVE_ASSET_ID, - event.transfer_count, - from_account.clone(), - to_account.clone(), - event.amount, - ) - .encode(), - ); - let proof_address = quantus_node::api::storage().wormhole().transfer_proof(( - NATIVE_ASSET_ID, - event.transfer_count, - event.from.clone(), - event.to.clone(), - event.amount, - )); - let mut final_key = proof_address.to_root_bytes(); - final_key.extend_from_slice(&leaf_hash); - let val = storage_api - .fetch_raw(final_key.clone()) - .await - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; - if val.is_none() { - return Err(crate::error::QuantusError::Generic("Storage key not found".to_string())); - } + // Load and add proofs using helper function + for (idx, proof_file) in proof_files.iter().enumerate() { + log_verbose!("Loading proof {}/{}: {}", idx + 1, proof_files.len(), proof_file); - let proof_params = rpc_params![vec![to_hex(&final_key)], block_hash]; - let read_proof: ReadProof = quantus_client - .rpc_client() - .request("state_getReadProof", proof_params) - .await - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + let proof_bytes = read_proof_file(proof_file).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to load {}: {}", proof_file, e)) + })?; - let header = blocks.header(); + let proof = ProofWithPublicInputs::::from_bytes(proof_bytes, &common_data) + .map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Failed to deserialize proof from {}: {}", + proof_file, e + )) + })?; - let state_root = BytesDigest::try_from(header.state_root.as_bytes()) - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; - let parent_hash = BytesDigest::try_from(header.parent_hash.as_bytes()) - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; - let extrinsics_root = BytesDigest::try_from(header.extrinsics_root.as_bytes()) - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; - let digest = - header.digest.encode().try_into().map_err(|_| { - crate::error::QuantusError::Generic("Failed to encode digest".to_string()) + aggregator.push_proof(proof).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to add proof: {}", e)) })?; + } - let block_number = header.number; + log_print!(" Running aggregation..."); + let agg_start = std::time::Instant::now(); + let aggregated_proof = aggregator + .aggregate() + .map_err(|e| crate::error::QuantusError::Generic(format!("Aggregation failed: {}", e)))?; + let agg_elapsed = agg_start.elapsed(); + log_print!(" Aggregation: {:.2}s", agg_elapsed.as_secs_f64()); - // Prepare storage proof - let processed_storage_proof = prepare_proof_for_circuit( - read_proof.proof.iter().map(|proof| proof.0.clone()).collect(), - hex::encode(header.state_root.0), - leaf_hash, + // Parse and display aggregated public inputs + let aggregated_public_inputs = AggregatedPublicCircuitInputs::try_from_felts( + aggregated_proof.proof.public_inputs.as_slice(), ) - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; - - // Quantize the funding amount using helper function - let input_amount_quantized: u32 = - quantize_funding_amount(funding_amount).map_err(crate::error::QuantusError::Generic)?; - - // Calculate output amount after fee deduction - let output_amount_quantized = compute_output_amount(input_amount_quantized, VOLUME_FEE_BPS); + .map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Failed to parse aggregated public inputs: {}", + e + )) + })?; - let inputs = CircuitInputs { - private: PrivateCircuitInputs { - secret, - transfer_count: event.transfer_count, - funding_account: BytesDigest::try_from(funding_account.as_ref() as &[u8]) - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?, - storage_proof: processed_storage_proof, - unspendable_account: Digest::from(unspendable_account).into(), - state_root, - extrinsics_root, - digest, - input_amount: input_amount_quantized, - }, - public: PublicCircuitInputs { - output_amount: output_amount_quantized, - volume_fee_bps: VOLUME_FEE_BPS, - nullifier: Nullifier::from_preimage(secret, event.transfer_count).hash.into(), - exit_account: BytesDigest::try_from(exit_account_id.as_ref() as &[u8]) - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?, - block_hash: BytesDigest::try_from(block_hash.as_ref()) - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?, - parent_hash, - block_number, - asset_id: NATIVE_ASSET_ID, - }, - }; + log_verbose!("Aggregated public inputs: {:#?}", aggregated_public_inputs); - log_verbose!("Generating ZK proof..."); - let config = CircuitConfig::standard_recursion_zk_config(); - let prover = WormholeProver::new(config); - let prover_next = prover - .commit(&inputs) - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; - let proof: ProofWithPublicInputs<_, _, 2> = prover_next.prove().map_err(|e| { - crate::error::QuantusError::Generic(format!("Proof generation failed: {}", e)) - })?; + // Log exit accounts and amounts that will be minted + log_print!(" Exit accounts in aggregated proof:"); + for (idx, account_data) in aggregated_public_inputs.account_data.iter().enumerate() { + let exit_bytes: &[u8] = account_data.exit_account.as_ref(); + let is_dummy = exit_bytes.iter().all(|&b| b == 0) || account_data.summed_output_amount == 0; + if is_dummy { + log_verbose!(" [{}] DUMMY (skipped)", idx); + } else { + // De-quantize to show actual amount that will be minted + let dequantized_amount = + (account_data.summed_output_amount as u128) * SCALE_DOWN_FACTOR; + log_print!( + " [{}] {} -> {} quantized ({} planck = {})", + idx, + hex::encode(exit_bytes), + account_data.summed_output_amount, + dequantized_amount, + format_balance(dequantized_amount) + ); + } + } - let public_inputs = PublicCircuitInputs::try_from(&proof) - .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + // Verify the aggregated proof locally + log_verbose!("Verifying aggregated proof locally..."); + aggregated_proof + .circuit_data + .verify(aggregated_proof.proof.clone()) + .map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Aggregated proof verification failed: {}", + e + )) + })?; - let proof_hex = hex::encode(proof.to_bytes()); - std::fs::write(&output_file, proof_hex).map_err(|e| { + // Save aggregated proof using helper function + write_proof_file(&output_file, &aggregated_proof.proof.to_bytes()).map_err(|e| { crate::error::QuantusError::Generic(format!("Failed to write proof: {}", e)) })?; - log_success!("Proof generated successfully!"); + log_success!("Aggregation complete!"); log_success!("Output: {}", output_file); - log_verbose!("Public inputs: {:?}", public_inputs); + log_print!( + "Aggregated {} proofs into 1 proof with {} exit accounts", + proof_files.len(), + aggregated_public_inputs.account_data.len() + ); Ok(()) } -async fn at_best_block( +#[derive(Debug, Clone, Copy)] +enum IncludedAt { + Best, + Finalized, +} + +impl IncludedAt { + fn label(self) -> &'static str { + match self { + IncludedAt::Best => "best block", + IncludedAt::Finalized => "finalized block", + } + } +} + +fn read_hex_proof_file_to_bytes(proof_file: &str) -> crate::error::Result> { + let proof_hex = std::fs::read_to_string(proof_file).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to read proof file: {}", e)) + })?; + + let proof_bytes = hex::decode(proof_hex.trim()) + .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to decode hex: {}", e)))?; + + Ok(proof_bytes) +} + +/// Submit unsigned verify_aggregated_proof(proof_bytes) and return (included_at, block_hash, +/// tx_hash). +async fn submit_unsigned_verify_aggregated_proof( quantus_client: &QuantusClient, -) -> anyhow::Result>> { - let best_block = quantus_client.get_latest_block().await?; - let block = quantus_client.client().blocks().at(best_block).await?; - Ok(block) + proof_bytes: Vec, +) -> crate::error::Result<(IncludedAt, subxt::utils::H256, subxt::utils::H256)> { + use subxt::tx::TxStatus; + + let verify_tx = quantus_node::api::tx().wormhole().verify_aggregated_proof(proof_bytes); + + let unsigned_tx = quantus_client.client().tx().create_unsigned(&verify_tx).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to create unsigned tx: {}", e)) + })?; + + let mut tx_progress = unsigned_tx + .submit_and_watch() + .await + .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to submit tx: {}", e)))?; + + while let Some(Ok(status)) = tx_progress.next().await { + match status { + TxStatus::InBestBlock(tx_in_block) => { + return Ok(( + IncludedAt::Best, + tx_in_block.block_hash(), + tx_in_block.extrinsic_hash(), + )); + }, + TxStatus::InFinalizedBlock(tx_in_block) => { + return Ok(( + IncludedAt::Finalized, + tx_in_block.block_hash(), + tx_in_block.extrinsic_hash(), + )); + }, + TxStatus::Error { message } | TxStatus::Invalid { message } => { + return Err(crate::error::QuantusError::Generic(format!( + "Transaction failed: {}", + message + ))); + }, + _ => continue, + } + } + + Err(crate::error::QuantusError::Generic("Transaction stream ended unexpectedly".to_string())) } -async fn aggregate_proofs( - proof_files: Vec, - output_file: String, - depth: usize, - branching_factor: usize, +/// Collect wormhole events for our extrinsic (by tx_hash) in a given block. +/// Returns (found_proof_verified, native_transfers). +async fn collect_wormhole_events_for_extrinsic( + quantus_client: &QuantusClient, + block_hash: subxt::utils::H256, + tx_hash: subxt::utils::H256, +) -> crate::error::Result<(bool, Vec)> { + use crate::chain::quantus_subxt::api::system::events::ExtrinsicFailed; + + let block = + quantus_client.client().blocks().at(block_hash).await.map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to get block: {}", e)) + })?; + + let events = block + .events() + .await + .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to get events: {}", e)))?; + + let extrinsics = block.extrinsics().await.map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to get extrinsics: {}", e)) + })?; + + let our_ext_idx = extrinsics + .iter() + .enumerate() + .find(|(_, ext)| ext.hash() == tx_hash) + .map(|(idx, _)| idx as u32) + .ok_or_else(|| { + crate::error::QuantusError::Generic( + "Could not find submitted extrinsic in included block".to_string(), + ) + })?; + + let mut transfer_events = Vec::new(); + let mut found_proof_verified = false; + + log_verbose!(" Events for our extrinsic (idx={}):", our_ext_idx); + + for event_result in events.iter() { + let event = event_result.map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to decode event: {}", e)) + })?; + + if let subxt::events::Phase::ApplyExtrinsic(ext_idx) = event.phase() { + if ext_idx == our_ext_idx { + log_print!(" Event: {}::{}", event.pallet_name(), event.variant_name()); + + // Decode ExtrinsicFailed to get the specific error + if let Ok(Some(ExtrinsicFailed { dispatch_error, .. })) = + event.as_event::() + { + let metadata = quantus_client.client().metadata(); + let error_msg = format_dispatch_error(&dispatch_error, &metadata); + log_print!(" DispatchError: {}", error_msg); + } + + if let Ok(Some(_)) = event.as_event::() { + found_proof_verified = true; + } + + if let Ok(Some(transfer)) = event.as_event::() + { + transfer_events.push(transfer); + } + } + } + } + + Ok((found_proof_verified, transfer_events)) +} + +async fn verify_aggregated_proof(proof_file: String, node_url: &str) -> crate::error::Result<()> { + log_print!("Verifying aggregated wormhole proof on-chain..."); + + let proof_bytes = read_hex_proof_file_to_bytes(&proof_file)?; + log_verbose!("Aggregated proof size: {} bytes", proof_bytes.len()); + + // Connect to node + let quantus_client = QuantusClient::new(node_url) + .await + .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to connect: {}", e)))?; + log_verbose!("Connected to node"); + + log_verbose!("Submitting unsigned aggregated verification transaction..."); + + let (included_at, block_hash, tx_hash) = + submit_unsigned_verify_aggregated_proof(&quantus_client, proof_bytes).await?; + + // One unified check (no best/finalized copy-paste) + let result = check_proof_verification_events( + quantus_client.client(), + &block_hash, + &tx_hash, + crate::log::is_verbose(), + ) + .await?; + + if result.success { + log_success!("Aggregated proof verified successfully on-chain!"); + if let Some(amount) = result.exit_amount { + log_success!("Total exit amount: {}", format_balance(amount)); + } + + log_verbose!("Included in {}: {:?}", included_at.label(), block_hash); + return Ok(()); + } + + let error_msg = result.error_message.unwrap_or_else(|| { + "Aggregated proof verification failed - no ProofVerified event found".to_string() + }); + log_error!("❌ {}", error_msg); + Err(crate::error::QuantusError::Generic(error_msg)) +} + +// ============================================================================ +// Multi-round wormhole flow implementation +// ============================================================================ + +/// Information about a transfer needed for proof generation +#[derive(Debug, Clone)] +#[allow(dead_code)] +struct TransferInfo { + /// Block hash where the transfer was included + block_hash: subxt::utils::H256, + /// Transfer count for this specific transfer + transfer_count: u64, + /// Amount transferred + amount: u128, + /// The wormhole address (destination of transfer) + wormhole_address: SubxtAccountId, + /// The funding account (source of transfer) + funding_account: SubxtAccountId, +} + +/// Derive a wormhole secret using HD derivation +/// Path: m/44'/189189189'/0'/round'/index' +fn derive_wormhole_secret( + mnemonic: &str, + round: usize, + index: usize, +) -> Result { + // QUANTUS_WORMHOLE_CHAIN_ID already includes the ' (e.g., "189189189'") + let path = format!("m/44'/{}/0'/{}'/{}'", QUANTUS_WORMHOLE_CHAIN_ID, round, index); + derive_wormhole_from_mnemonic(mnemonic, None, &path) + .map_err(|e| crate::error::QuantusError::Generic(format!("HD derivation failed: {:?}", e))) +} + +/// Calculate the amount for a given round, accounting for fees +/// Each round deducts 0.1% fee (10 bps) +/// Round 1: fee applied once, Round 2: fee applied twice, etc. +fn calculate_round_amount(initial_amount: u128, round: usize) -> u128 { + let mut amount = initial_amount; + for _ in 0..round { + // Output = Input * (10000 - 10) / 10000 + amount = amount * 9990 / 10000; + } + amount +} + +/// Get the minting account from chain constants +async fn get_minting_account( + client: &OnlineClient, +) -> Result { + let minting_account_addr = quantus_node::api::constants().wormhole().minting_account(); + let minting_account = client.constants().at(&minting_account_addr).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to get minting account: {}", e)) + })?; + Ok(minting_account) +} + +/// Parse transfer info from NativeTransferred events in a block and updates block hash for all +/// transfers +fn parse_transfer_events( + events: &[wormhole::events::NativeTransferred], + expected_addresses: &[SubxtAccountId], + block_hash: subxt::utils::H256, +) -> Result, crate::error::QuantusError> { + let mut transfer_infos = Vec::new(); + + for expected_addr in expected_addresses { + // Find the event matching this address + let matching_event = events.iter().find(|e| &e.to == expected_addr).ok_or_else(|| { + crate::error::QuantusError::Generic(format!( + "No transfer event found for address {:?}", + expected_addr + )) + })?; + + transfer_infos.push(TransferInfo { + block_hash, + transfer_count: matching_event.transfer_count, + amount: matching_event.amount, + wormhole_address: expected_addr.clone(), + funding_account: matching_event.from.clone(), + }); + } + + Ok(transfer_infos) +} + +/// Configuration for multiround execution +struct MultiroundConfig { + num_proofs: usize, + rounds: usize, + amount: u128, + output_dir: String, + keep_files: bool, +} + +/// Wallet context for multiround execution +struct MultiroundWalletContext { + wallet_name: String, + wallet_address: String, + wallet_account_id: SubxtAccountId, + keypair: QuantumKeyPair, + mnemonic: String, +} + +/// Validate multiround parameters +fn validate_multiround_params( + num_proofs: usize, + rounds: usize, + max_proofs: usize, ) -> crate::error::Result<()> { - use qp_wormhole_aggregator::{ - aggregator::WormholeProofAggregator, circuits::tree::TreeAggregationConfig, + if !(1..=max_proofs).contains(&num_proofs) { + return Err(crate::error::QuantusError::Generic(format!( + "num_proofs must be between 1 and {} (got: {})", + max_proofs, num_proofs + ))); + } + if rounds < 1 { + return Err(crate::error::QuantusError::Generic(format!( + "rounds must be at least 1 (got: {})", + rounds + ))); + } + Ok(()) +} + +/// Load wallet and prepare context for multiround execution +fn load_multiround_wallet( + wallet_name: &str, + password: Option, + password_file: Option, +) -> crate::error::Result { + let wallet_manager = WalletManager::new()?; + let wallet_password = password::get_wallet_password(wallet_name, password, password_file)?; + let wallet_data = wallet_manager.load_wallet(wallet_name, &wallet_password)?; + let wallet_address = wallet_data.keypair.to_account_id_ss58check(); + let wallet_account_id = SubxtAccountId(wallet_data.keypair.to_account_id_32().into()); + + // Get or generate mnemonic for HD derivation + let mnemonic = match wallet_data.mnemonic { + Some(m) => { + log_verbose!("Using wallet mnemonic for HD derivation"); + m + }, + None => { + log_print!("Wallet has no mnemonic - generating random mnemonic for wormhole secrets"); + let mut entropy = [0u8; 32]; + rand::rng().fill_bytes(&mut entropy); + let sensitive_entropy = SensitiveBytes32::from(&mut entropy); + let m = generate_mnemonic(sensitive_entropy).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to generate mnemonic: {:?}", e)) + })?; + log_verbose!("Generated mnemonic (not saved): {}", m); + m + }, }; - log_print!("Aggregating {} proofs...", proof_files.len()); + Ok(MultiroundWalletContext { + wallet_name: wallet_name.to_string(), + wallet_address, + wallet_account_id, + keypair: wallet_data.keypair, + mnemonic, + }) +} - // Validate aggregation parameters using helper function - let max_leaf_proofs = validate_aggregation_params(proof_files.len(), depth, branching_factor) - .map_err(crate::error::QuantusError::Generic)?; +/// Print multiround configuration summary +fn print_multiround_config( + config: &MultiroundConfig, + wallet: &MultiroundWalletContext, + agg_config: &AggregationConfig, +) { + use colored::Colorize; + + log_print!("{}", "Configuration:".bright_cyan()); + log_print!(" Wallet: {}", wallet.wallet_name); + log_print!(" Wallet address: {}", wallet.wallet_address); + log_print!( + " Total amount: {} ({}) - randomly partitioned across {} proofs", + config.amount, + format_balance(config.amount), + config.num_proofs + ); + log_print!(" Proofs per round: {}", config.num_proofs); + log_print!(" Rounds: {}", config.rounds); + log_print!(" Aggregation: num_leaf_proofs={}", agg_config.num_leaf_proofs); + log_print!(" Output directory: {}", config.output_dir); + log_print!(" Keep files: {}", config.keep_files); + log_print!(""); + + // Show expected amounts per round + log_print!("{}", "Expected amounts per round:".bright_cyan()); + for r in 1..=config.rounds { + let round_amount = calculate_round_amount(config.amount, r); + log_print!(" Round {}: {} ({})", r, round_amount, format_balance(round_amount)); + } + log_print!(""); +} - // Build the wormhole verifier to get circuit data for parsing proofs - let config = CircuitConfig::standard_recursion_zk_config(); - let verifier = WormholeVerifier::new(config.clone(), None); - let common_data = verifier.circuit_data.common.clone(); +/// Execute initial transfers from wallet to wormhole addresses (round 1 only). +/// +/// Sends all transfers in a single batched extrinsic using `utility.batch()`, +/// then parses the `NativeTransferred` events to extract transfer info for proof generation. +async fn execute_initial_transfers( + quantus_client: &QuantusClient, + wallet: &MultiroundWalletContext, + secrets: &[WormholePair], + amount: u128, + num_proofs: usize, +) -> crate::error::Result> { + use colored::Colorize; + use quantus_node::api::runtime_types::{ + pallet_balances::pallet::Call as BalancesCall, quantus_runtime::RuntimeCall, + }; - // Configure aggregation - let aggregation_config = TreeAggregationConfig::new(branching_factor, depth as u32); + log_print!("{}", "Step 1: Sending batched transfer to wormhole addresses...".bright_yellow()); - log_verbose!( - "Aggregation config: branching_factor={}, depth={}, num_leaf_proofs={}", - aggregation_config.tree_branching_factor, - aggregation_config.tree_depth, - max_leaf_proofs - ); + // Randomly partition the total amount among proofs + // Each partition must meet the on-chain minimum transfer amount + // Minimum per partition is 0.02 DEV (2 quantized units) to ensure non-trivial amounts + let partition_amounts = random_partition(amount, num_proofs, 2 * SCALE_DOWN_FACTOR); + log_print!(" Random partition of {} ({}):", amount, format_balance(amount)); + for (i, &amt) in partition_amounts.iter().enumerate() { + log_print!(" Proof {}: {} ({})", i + 1, amt, format_balance(amt)); + } - // Create aggregator - let mut aggregator = - WormholeProofAggregator::new(verifier.circuit_data).with_config(aggregation_config); + // Build batch of transfer calls + let mut calls = Vec::with_capacity(num_proofs); + for (i, secret) in secrets.iter().enumerate() { + let wormhole_address = SubxtAccountId(secret.address); + let transfer_call = RuntimeCall::Balances(BalancesCall::transfer_allow_death { + dest: subxt::ext::subxt_core::utils::MultiAddress::Id(wormhole_address), + value: partition_amounts[i], + }); + calls.push(transfer_call); + } - // Load and add proofs using helper function - for (idx, proof_file) in proof_files.iter().enumerate() { - log_verbose!("Loading proof {}/{}: {}", idx + 1, proof_files.len(), proof_file); + let batch_tx = quantus_node::api::tx().utility().batch(calls); - let proof_bytes = read_proof_file(proof_file).map_err(|e| { - crate::error::QuantusError::Generic(format!("Failed to load {}: {}", proof_file, e)) + let quantum_keypair = QuantumKeyPair { + public_key: wallet.keypair.public_key.clone(), + private_key: wallet.keypair.private_key.clone(), + }; + + log_print!(" Submitting batch of {} transfers...", num_proofs); + + submit_transaction( + quantus_client, + &quantum_keypair, + batch_tx, + None, + ExecutionMode { finalized: false, wait_for_transaction: true }, + ) + .await + .map_err(|e| crate::error::QuantusError::Generic(format!("Batch transfer failed: {}", e)))?; + + // Get the block and find all NativeTransferred events + let client = quantus_client.client(); + let block = at_best_block(quantus_client) + .await + .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to get block: {}", e)))?; + let block_hash = block.hash(); + + let events_api = + client.events().at(block_hash).await.map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to get events: {}", e)) })?; - let proof = ProofWithPublicInputs::::from_bytes(proof_bytes, &common_data) - .map_err(|e| { + // Match each secret's wormhole address to its NativeTransferred event + let funding_account: SubxtAccountId = SubxtAccountId(wallet.keypair.to_account_id_32().into()); + let mut transfers = Vec::with_capacity(num_proofs); + + for (i, secret) in secrets.iter().enumerate() { + let event = events_api + .find::() + .find(|e| if let Ok(evt) = e { evt.to.0 == secret.address } else { false }) + .ok_or_else(|| { crate::error::QuantusError::Generic(format!( - "Failed to deserialize proof from {}: {}", - proof_file, e + "No NativeTransferred event found for wormhole address {} (proof {})", + hex::encode(secret.address), + i + 1 )) + })? + .map_err(|e| { + crate::error::QuantusError::Generic(format!("Event decode error: {}", e)) })?; - aggregator.push_proof(proof).map_err(|e| { - crate::error::QuantusError::Generic(format!("Failed to add proof: {}", e)) + transfers.push(TransferInfo { + block_hash, + transfer_count: event.transfer_count, + amount: partition_amounts[i], + wormhole_address: SubxtAccountId(secret.address), + funding_account: funding_account.clone(), + }); + } + + log_success!( + " {} transfers submitted in a single batch (block {})", + num_proofs, + hex::encode(block_hash.0) + ); + + Ok(transfers) +} + +/// Generate proofs for a round with random output partitioning +async fn generate_round_proofs( + quantus_client: &QuantusClient, + secrets: &[WormholePair], + transfers: &[TransferInfo], + exit_accounts: &[SubxtAccountId], + round_dir: &str, + num_proofs: usize, +) -> crate::error::Result> { + use colored::Colorize; + + log_print!("{}", "Step 2: Generating proofs...".bright_yellow()); + + // All proofs in an aggregation batch must use the same block for storage proofs. + let proof_block = at_best_block(quantus_client) + .await + .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to get block: {}", e)))?; + let proof_block_hash = proof_block.hash(); + log_print!(" Using block {} for all proofs", hex::encode(proof_block_hash.0)); + + // Collect input amounts and exit accounts for random assignment + let input_amounts: Vec = transfers.iter().map(|t| t.amount).collect(); + let exit_account_bytes: Vec<[u8; 32]> = exit_accounts.iter().map(|a| a.0).collect(); + + // Compute random output assignments (each proof can have 2 outputs) + let output_assignments = + compute_random_output_assignments(&input_amounts, &exit_account_bytes, VOLUME_FEE_BPS); + + // Log the random partition + log_print!(" Random output partition:"); + for (i, assignment) in output_assignments.iter().enumerate() { + let amt1_planck = (assignment.output_amount_1 as u128) * SCALE_DOWN_FACTOR; + if assignment.output_amount_2 > 0 { + let amt2_planck = (assignment.output_amount_2 as u128) * SCALE_DOWN_FACTOR; + log_print!( + " Proof {}: {} ({}) -> 0x{}..., {} ({}) -> 0x{}...", + i + 1, + assignment.output_amount_1, + format_balance(amt1_planck), + hex::encode(&assignment.exit_account_1[..4]), + assignment.output_amount_2, + format_balance(amt2_planck), + hex::encode(&assignment.exit_account_2[..4]) + ); + } else { + log_print!( + " Proof {}: {} ({}) -> 0x{}...", + i + 1, + assignment.output_amount_1, + format_balance(amt1_planck), + hex::encode(&assignment.exit_account_1[..4]) + ); + } + } + + let pb = ProgressBar::new(num_proofs as u64); + pb.set_style( + ProgressStyle::default_bar() + .template("{spinner:.green} [{bar:40.cyan/blue}] {pos}/{len} {msg}") + .unwrap() + .progress_chars("#>-"), + ); + + let proof_gen_start = std::time::Instant::now(); + let mut proof_files = Vec::new(); + for (i, (secret, transfer)) in secrets.iter().zip(transfers.iter()).enumerate() { + pb.set_message(format!("Proof {}/{}", i + 1, num_proofs)); + + let proof_file = format!("{}/proof_{}.hex", round_dir, i + 1); + + // Use the funding account from the transfer info + let funding_account_hex = format!("0x{}", hex::encode(transfer.funding_account.0)); + + let single_start = std::time::Instant::now(); + + // Generate proof with dual output assignment + generate_proof( + &hex::encode(secret.secret), + transfer.amount, // Use actual transfer amount for storage key + &output_assignments[i], + &format!("0x{}", hex::encode(proof_block_hash.0)), + transfer.transfer_count, + &funding_account_hex, + &proof_file, + quantus_client, + ) + .await?; + + let single_elapsed = single_start.elapsed(); + log_verbose!(" Proof {} generated in {:.2}s", i + 1, single_elapsed.as_secs_f64()); + + proof_files.push(proof_file); + pb.inc(1); + } + pb.finish_with_message("Proofs generated"); + let proof_gen_elapsed = proof_gen_start.elapsed(); + log_print!( + " Proof generation: {:.2}s ({} proofs, {:.2}s avg)", + proof_gen_elapsed.as_secs_f64(), + num_proofs, + proof_gen_elapsed.as_secs_f64() / num_proofs as f64, + ); + + Ok(proof_files) +} + +/// Derive wormhole secrets for a round +fn derive_round_secrets( + mnemonic: &str, + round: usize, + num_proofs: usize, +) -> crate::error::Result> { + let pb = ProgressBar::new(num_proofs as u64); + pb.set_style( + ProgressStyle::default_bar() + .template("{spinner:.green} [{bar:40.cyan/blue}] {pos}/{len} {msg}") + .unwrap() + .progress_chars("#>-"), + ); + pb.set_message("Deriving secrets..."); + + let mut secrets = Vec::new(); + for i in 1..=num_proofs { + let secret = derive_wormhole_secret(mnemonic, round, i)?; + secrets.push(secret); + pb.inc(1); + } + pb.finish_with_message("Secrets derived"); + + Ok(secrets) +} + +/// Verify final balance and print summary +fn verify_final_balance( + initial_balance: u128, + final_balance: u128, + total_sent: u128, + rounds: usize, + num_proofs: usize, +) { + use colored::Colorize; + + log_print!("{}", "Balance Verification:".bright_cyan()); + + // Total received in final round: apply fee deduction for each round + let total_received = calculate_round_amount(total_sent, rounds); + + // Expected net change (may be negative due to fees) + let expected_change = total_received as i128 - total_sent as i128; + let actual_change = final_balance as i128 - initial_balance as i128; + + log_print!(" Initial balance: {} ({})", initial_balance, format_balance(initial_balance)); + log_print!(" Final balance: {} ({})", final_balance, format_balance(final_balance)); + log_print!(""); + log_print!(" Total sent (round 1): {} ({})", total_sent, format_balance(total_sent)); + log_print!( + " Total received (round {}): {} ({})", + rounds, + total_received, + format_balance(total_received) + ); + log_print!(""); + + // Format signed amounts for display + let expected_change_str = if expected_change >= 0 { + format!("+{}", expected_change) + } else { + format!("{}", expected_change) + }; + let actual_change_str = if actual_change >= 0 { + format!("+{}", actual_change) + } else { + format!("{}", actual_change) + }; + + log_print!(" Expected change: {} planck", expected_change_str); + log_print!(" Actual change: {} planck", actual_change_str); + log_print!(""); + + // Allow some tolerance for transaction fees + let tolerance = (total_sent / 100).max(1_000_000_000_000); // 1% or 1 QNT minimum + + let diff = (actual_change - expected_change).unsigned_abs(); + if diff <= tolerance { + log_success!( + " {} Balance verification PASSED (within tolerance of {} planck)", + "✓".bright_green(), + tolerance + ); + } else { + log_print!( + " {} Balance verification: difference of {} planck (tolerance: {} planck)", + "!".bright_yellow(), + diff, + tolerance + ); + log_print!( + " Note: Transaction fees for {} initial transfers may account for the difference", + num_proofs + ); + } + log_print!(""); +} + +/// Run the multi-round wormhole flow +#[allow(clippy::too_many_arguments)] +async fn run_multiround( + num_proofs: usize, + rounds: usize, + amount: u128, + wallet_name: String, + password: Option, + password_file: Option, + keep_files: bool, + output_dir: String, + dry_run: bool, + node_url: &str, +) -> crate::error::Result<()> { + use colored::Colorize; + + log_print!(""); + log_print!("=================================================="); + log_print!(" Wormhole Multi-Round Flow Test"); + log_print!("=================================================="); + log_print!(""); + + // Load aggregation config from generated-bins/config.json + let agg_config = AggregationConfig::load_from_bins()?; + + // Validate parameters + validate_multiround_params(num_proofs, rounds, agg_config.num_leaf_proofs)?; + + // Load wallet + let wallet = load_multiround_wallet(&wallet_name, password, password_file)?; + + // Create config struct + let config = + MultiroundConfig { num_proofs, rounds, amount, output_dir: output_dir.clone(), keep_files }; + + // Print configuration + print_multiround_config(&config, &wallet, &agg_config); + log_print!(" Dry run: {}", dry_run); + log_print!(""); + + // Create output directory + std::fs::create_dir_all(&output_dir).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to create output directory: {}", e)) + })?; + + if dry_run { + return run_multiround_dry_run( + &wallet.mnemonic, + num_proofs, + rounds, + amount, + &wallet.wallet_address, + ); + } + + // Connect to node + let quantus_client = QuantusClient::new(node_url).await.map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to connect to node: {}", e)) + })?; + let client = quantus_client.client(); + + // Get minting account from chain + let minting_account = get_minting_account(client).await?; + log_verbose!("Minting account: {:?}", minting_account); + + // Record initial wallet balance for verification + let initial_balance = get_balance(&quantus_client, &wallet.wallet_address).await?; + log_print!("{}", "Initial Balance:".bright_cyan()); + log_print!(" Wallet balance: {} ({})", initial_balance, format_balance(initial_balance)); + log_print!(""); + + // Track transfer info for the current round + let mut current_transfers: Vec = Vec::new(); + + for round in 1..=rounds { + let is_final = round == rounds; + + log_print!(""); + log_print!("--------------------------------------------------"); + log_print!( + " {} Round {} of {} {}", + ">>>".bright_blue(), + round, + rounds, + "<<<".bright_blue() + ); + log_print!("--------------------------------------------------"); + log_print!(""); + + // Create round output directory + let round_dir = format!("{}/round{}", output_dir, round); + std::fs::create_dir_all(&round_dir).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to create round directory: {}", e)) })?; + + // Derive secrets for this round + let secrets = derive_round_secrets(&wallet.mnemonic, round, num_proofs)?; + + // Determine exit accounts + let exit_accounts: Vec = if is_final { + log_print!("Final round - all proofs exit to wallet: {}", wallet.wallet_address); + vec![wallet.wallet_account_id.clone(); num_proofs] + } else { + log_print!( + "Intermediate round - proofs exit to round {} wormhole addresses", + round + 1 + ); + let mut addrs = Vec::new(); + for i in 1..=num_proofs { + let next_secret = derive_wormhole_secret(&wallet.mnemonic, round + 1, i)?; + addrs.push(SubxtAccountId(next_secret.address)); + } + addrs + }; + + // Step 1: Get transfer info (execute transfers for round 1, reuse from previous round + // otherwise) + if round == 1 { + current_transfers = + execute_initial_transfers(&quantus_client, &wallet, &secrets, amount, num_proofs) + .await?; + + // Log balance immediately after funding transfers + let balance_after_funding = + get_balance(&quantus_client, &wallet.wallet_address).await?; + let funding_deducted = initial_balance.saturating_sub(balance_after_funding); + log_print!( + " Balance after funding: {} ({}) [deducted: {} planck]", + balance_after_funding, + format_balance(balance_after_funding), + funding_deducted + ); + } else { + log_print!("{}", "Step 1: Using transfer info from previous round...".bright_yellow()); + log_print!(" Found {} transfer(s) from previous round", current_transfers.len()); + } + + // Step 2: Generate proofs with random output partitioning + let proof_files = generate_round_proofs( + &quantus_client, + &secrets, + ¤t_transfers, + &exit_accounts, + &round_dir, + num_proofs, + ) + .await?; + + // Step 3: Aggregate proofs + log_print!("{}", "Step 3: Aggregating proofs...".bright_yellow()); + + let aggregated_file = format!("{}/aggregated.hex", round_dir); + aggregate_proofs(proof_files, aggregated_file.clone()).await?; + + log_print!(" Aggregated proof saved to {}", aggregated_file); + + // Step 4: Verify aggregated proof on-chain + log_print!("{}", "Step 4: Submitting aggregated proof on-chain...".bright_yellow()); + + let (verification_block, transfer_events) = + verify_aggregated_and_get_events(&aggregated_file, &quantus_client).await?; + + log_print!( + " {} Proof verified in block {}", + "✓".bright_green(), + hex::encode(verification_block.0) + ); + + // If not final round, prepare transfer info for next round + if !is_final { + log_print!("{}", "Step 5: Capturing transfer info for next round...".bright_yellow()); + + // Parse events to get transfer info for next round's wormhole addresses + let next_round_addresses: Vec = (1..=num_proofs) + .map(|i| { + let next_secret = + derive_wormhole_secret(&wallet.mnemonic, round + 1, i).unwrap(); + SubxtAccountId(next_secret.address) + }) + .collect(); + + current_transfers = + parse_transfer_events(&transfer_events, &next_round_addresses, verification_block)?; + + log_print!( + " Captured {} transfer(s) for round {}", + current_transfers.len(), + round + 1 + ); + } + + // Log balance after this round + let balance_after_round = get_balance(&quantus_client, &wallet.wallet_address).await?; + let change_from_initial = balance_after_round as i128 - initial_balance as i128; + let change_str = if change_from_initial >= 0 { + format!("+{}", change_from_initial) + } else { + format!("{}", change_from_initial) + }; + log_print!(""); + log_print!( + " Balance after round {}: {} ({}) [change: {} planck]", + round, + balance_after_round, + format_balance(balance_after_round), + change_str + ); + + log_print!(""); + log_print!(" {} Round {} complete!", "✓".bright_green(), round); } - log_print!("Running aggregation..."); - let aggregated_proof = aggregator - .aggregate() - .map_err(|e| crate::error::QuantusError::Generic(format!("Aggregation failed: {}", e)))?; + log_print!(""); + log_print!("=================================================="); + log_success!(" All {} rounds completed successfully!", rounds); + log_print!("=================================================="); + log_print!(""); - // Parse and display aggregated public inputs - let aggregated_public_inputs = AggregatedPublicCircuitInputs::try_from_slice( - aggregated_proof.proof.public_inputs.as_slice(), - ) - .map_err(|e| { - crate::error::QuantusError::Generic(format!( - "Failed to parse aggregated public inputs: {}", - e - )) + // Final balance verification + let final_balance = get_balance(&quantus_client, &wallet.wallet_address).await?; + verify_final_balance(initial_balance, final_balance, amount, rounds, num_proofs); + + if keep_files { + log_print!("Proof files preserved in: {}", output_dir); + } else { + log_print!("Cleaning up proof files..."); + std::fs::remove_dir_all(&output_dir).ok(); + } + + Ok(()) +} + +/// Generate a wormhole proof with dual outputs (used for random partitioning in multiround) +async fn generate_proof( + secret_hex: &str, + funding_amount: u128, + output_assignment: &ProofOutputAssignment, + block_hash_str: &str, + transfer_count: u64, + funding_account_str: &str, + output_file: &str, + quantus_client: &QuantusClient, +) -> crate::error::Result<()> { + // Parse secret + let secret_array = parse_secret_hex(secret_hex).map_err(crate::error::QuantusError::Generic)?; + let secret: BytesDigest = secret_array.try_into().map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to convert secret: {:?}", e)) })?; - log_verbose!("Aggregated public inputs: {:#?}", aggregated_public_inputs); + // Parse funding account + let funding_account_bytes = + parse_exit_account(funding_account_str).map_err(crate::error::QuantusError::Generic)?; + let funding_account = AccountId32::new(funding_account_bytes); + + // Parse block hash + let hash_bytes = hex::decode(block_hash_str.trim_start_matches("0x")) + .map_err(|e| crate::error::QuantusError::Generic(format!("Invalid block hash: {}", e)))?; + if hash_bytes.len() != 32 { + return Err(crate::error::QuantusError::Generic(format!( + "Block hash must be 32 bytes, got {}", + hash_bytes.len() + ))); + } + let hash: [u8; 32] = hash_bytes.try_into().unwrap(); + let block_hash = subxt::utils::H256::from(hash); + + let client = quantus_client.client(); + + // Generate unspendable account from secret + let unspendable_account = + qp_wormhole_circuit::unspendable_account::UnspendableAccount::from_secret(secret) + .account_id; + let unspendable_account_bytes_digest = + qp_zk_circuits_common::utils::digest_felts_to_bytes(unspendable_account); + let unspendable_account_bytes: [u8; 32] = unspendable_account_bytes_digest + .as_ref() + .try_into() + .expect("BytesDigest is always 32 bytes"); + + let from_account = funding_account.clone(); + let to_account = AccountId32::new(unspendable_account_bytes); + + // Get block + let blocks = + client.blocks().at(block_hash).await.map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to get block: {}", e)) + })?; + + // Build storage key + let leaf_hash = qp_poseidon::PoseidonHasher::hash_storage::( + &( + NATIVE_ASSET_ID, + transfer_count, + from_account.clone(), + to_account.clone(), + funding_amount, + ) + .encode(), + ); + + let proof_address = quantus_node::api::storage().wormhole().transfer_proof(( + NATIVE_ASSET_ID, + transfer_count, + SubxtAccountId(from_account.clone().into()), + SubxtAccountId(to_account.clone().into()), + funding_amount, + )); + + let mut final_key = proof_address.to_root_bytes(); + final_key.extend_from_slice(&leaf_hash); + + // Verify storage key exists + let storage_api = client.storage().at(block_hash); + let val = storage_api + .fetch_raw(final_key.clone()) + .await + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + if val.is_none() { + return Err(crate::error::QuantusError::Generic( + "Storage key not found - transfer may not exist in this block".to_string(), + )); + } + + // Get storage proof + let proof_params = rpc_params![vec![to_hex(&final_key)], block_hash]; + let read_proof: ReadProof = quantus_client + .rpc_client() + .request("state_getReadProof", proof_params) + .await + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + + let header = blocks.header(); + let state_root = BytesDigest::try_from(header.state_root.as_bytes()) + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + let parent_hash = BytesDigest::try_from(header.parent_hash.as_bytes()) + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + let extrinsics_root = BytesDigest::try_from(header.extrinsics_root.as_bytes()) + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + let digest = + header.digest.encode().try_into().map_err(|_| { + crate::error::QuantusError::Generic("Failed to encode digest".to_string()) + })?; + let block_number = header.number; + + // Prepare storage proof + let processed_storage_proof = prepare_proof_for_circuit( + read_proof.proof.iter().map(|proof| proof.0.clone()).collect(), + hex::encode(header.state_root.0), + leaf_hash, + ) + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + + // Quantize input amount + let input_amount_quantized: u32 = + quantize_funding_amount(funding_amount).map_err(crate::error::QuantusError::Generic)?; + + // Use the output assignment directly (already quantized) + let inputs = CircuitInputs { + private: PrivateCircuitInputs { + secret, + transfer_count, + funding_account: BytesDigest::try_from(funding_account.as_ref() as &[u8]) + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?, + storage_proof: processed_storage_proof, + unspendable_account: unspendable_account_bytes_digest, + parent_hash, + state_root, + extrinsics_root, + digest, + input_amount: input_amount_quantized, + }, + public: PublicCircuitInputs { + output_amount_1: output_assignment.output_amount_1, + output_amount_2: output_assignment.output_amount_2, + volume_fee_bps: VOLUME_FEE_BPS, + nullifier: digest_felts_to_bytes(Nullifier::from_preimage(secret, transfer_count).hash), + exit_account_1: BytesDigest::try_from(output_assignment.exit_account_1.as_ref()) + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?, + exit_account_2: BytesDigest::try_from(output_assignment.exit_account_2.as_ref()) + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?, + block_hash: BytesDigest::try_from(block_hash.as_ref()) + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?, + block_number, + asset_id: NATIVE_ASSET_ID, + }, + }; - // Verify the aggregated proof locally - log_verbose!("Verifying aggregated proof locally..."); - aggregated_proof - .circuit_data - .verify(aggregated_proof.proof.clone()) - .map_err(|e| { - crate::error::QuantusError::Generic(format!( - "Aggregated proof verification failed: {}", - e - )) - })?; + // Load prover from pre-built bins + let bins_dir = Path::new("generated-bins"); + let prover = + WormholeProver::new_from_files(&bins_dir.join("prover.bin"), &bins_dir.join("common.bin")) + .map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to load prover: {}", e)) + })?; + let prover_next = prover + .commit(&inputs) + .map_err(|e| crate::error::QuantusError::Generic(e.to_string()))?; + let proof: ProofWithPublicInputs<_, _, 2> = prover_next.prove().map_err(|e| { + crate::error::QuantusError::Generic(format!("Proof generation failed: {}", e)) + })?; - // Save aggregated proof using helper function - write_proof_file(&output_file, &aggregated_proof.proof.to_bytes()).map_err(|e| { + let proof_hex = hex::encode(proof.to_bytes()); + std::fs::write(output_file, proof_hex).map_err(|e| { crate::error::QuantusError::Generic(format!("Failed to write proof: {}", e)) })?; - log_success!("Aggregation complete!"); - log_success!("Output: {}", output_file); - log_print!( - "Aggregated {} proofs into 1 proof with {} exit accounts", - proof_files.len(), - aggregated_public_inputs.account_data.len() - ); - Ok(()) } -async fn verify_aggregated_proof(proof_file: String, node_url: &str) -> crate::error::Result<()> { - use subxt::tx::TxStatus; +/// Verify an aggregated proof and return the block hash and transfer events +async fn verify_aggregated_and_get_events( + proof_file: &str, + quantus_client: &QuantusClient, +) -> crate::error::Result<(subxt::utils::H256, Vec)> { + use qp_wormhole_verifier::WormholeVerifier; - log_print!("Verifying aggregated wormhole proof on-chain..."); + let proof_bytes = read_hex_proof_file_to_bytes(proof_file)?; - // Read proof from file - let proof_hex = std::fs::read_to_string(&proof_file).map_err(|e| { - crate::error::QuantusError::Generic(format!("Failed to read proof file: {}", e)) + // Verify locally before submitting on-chain + log_verbose!("Verifying aggregated proof locally before on-chain submission..."); + let bins_dir = Path::new("generated-bins"); + let verifier = WormholeVerifier::new_from_files( + &bins_dir.join("aggregated_verifier.bin"), + &bins_dir.join("aggregated_common.bin"), + ) + .map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to load aggregated verifier: {}", e)) })?; - let proof_bytes = hex::decode(proof_hex.trim()) - .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to decode hex: {}", e)))?; + let proof = qp_wormhole_verifier::ProofWithPublicInputs::< + qp_wormhole_verifier::F, + qp_wormhole_verifier::C, + { qp_wormhole_verifier::D }, + >::from_bytes(proof_bytes.clone(), &verifier.circuit_data.common) + .map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Failed to deserialize aggregated proof: {}", + e + )) + })?; - log_verbose!("Aggregated proof size: {} bytes", proof_bytes.len()); + verifier.verify(proof).map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Local aggregated proof verification failed: {}", + e + )) + })?; + log_verbose!("Local verification passed!"); - // Connect to node - let quantus_client = QuantusClient::new(node_url) - .await - .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to connect: {}", e)))?; + // Submit unsigned tx + wait for inclusion (best or finalized) + let (included_at, block_hash, tx_hash) = + submit_unsigned_verify_aggregated_proof(quantus_client, proof_bytes).await?; - log_verbose!("Connected to node"); + log_verbose!( + "Submitted tx included in {}: block={:?}, tx={:?}", + included_at.label(), + block_hash, + tx_hash + ); - // Create the verify_aggregated_proof transaction payload - let verify_tx = quantus_node::api::tx().wormhole().verify_aggregated_proof(proof_bytes); + // Collect events for our extrinsic only + let (found_proof_verified, transfer_events) = + collect_wormhole_events_for_extrinsic(quantus_client, block_hash, tx_hash).await?; - log_verbose!("Submitting unsigned aggregated verification transaction..."); + if !found_proof_verified { + return Err(crate::error::QuantusError::Generic( + "Proof verification failed - no ProofVerified event".to_string(), + )); + } - // Submit as unsigned extrinsic - let unsigned_tx = quantus_client.client().tx().create_unsigned(&verify_tx).map_err(|e| { - crate::error::QuantusError::Generic(format!("Failed to create unsigned tx: {}", e)) - })?; + // Log minted amounts + log_print!(" Tokens minted (from NativeTransferred events):"); + for (idx, transfer) in transfer_events.iter().enumerate() { + let to_hex = hex::encode(transfer.to.0); + log_print!( + " [{}] {} -> {} planck ({})", + idx, + to_hex, + transfer.amount, + format_balance(transfer.amount) + ); + } - let mut tx_progress = unsigned_tx - .submit_and_watch() - .await - .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to submit tx: {}", e)))?; + Ok((block_hash, transfer_events)) +} - // Wait for transaction inclusion - while let Some(Ok(status)) = tx_progress.next().await { - log_verbose!("Transaction status: {:?}", status); - match status { - TxStatus::InBestBlock(tx_in_block) => { - let block_hash = tx_in_block.block_hash(); - log_success!("Aggregated proof verified successfully on-chain!"); - log_verbose!("Included in block: {:?}", block_hash); - return Ok(()); - }, - TxStatus::InFinalizedBlock(tx_in_block) => { - let block_hash = tx_in_block.block_hash(); - log_success!("Aggregated proof verified successfully on-chain!"); - log_verbose!("Finalized in block: {:?}", block_hash); - return Ok(()); - }, - TxStatus::Error { message } | TxStatus::Invalid { message } => { - return Err(crate::error::QuantusError::Generic(format!( - "Transaction failed: {}", - message - ))); - }, - _ => continue, +/// Dry run - show what would happen without executing +fn run_multiround_dry_run( + mnemonic: &str, + num_proofs: usize, + rounds: usize, + amount: u128, + wallet_address: &str, +) -> crate::error::Result<()> { + use colored::Colorize; + + log_print!(""); + log_print!("{}", "=== DRY RUN MODE ===".bright_yellow()); + log_print!("No transactions will be executed."); + log_print!(""); + + for round in 1..=rounds { + let is_final = round == rounds; + let round_amount = calculate_round_amount(amount, round); + + log_print!(""); + log_print!("{}", format!("Round {}", round).bright_cyan()); + log_print!(" Total amount: {} ({})", round_amount, format_balance(round_amount)); + + // Show sample random partition for round 1 + if round == 1 { + let partition = random_partition(amount, num_proofs, 2 * SCALE_DOWN_FACTOR); + log_print!(" Sample random partition (actual partition will differ):"); + for (i, &amt) in partition.iter().enumerate() { + log_print!(" Proof {}: {} ({})", i + 1, amt, format_balance(amt)); + } + } + log_print!(""); + + log_print!(" Wormhole addresses (to be funded):"); + for i in 1..=num_proofs { + let secret = derive_wormhole_secret(mnemonic, round, i)?; + let address = sp_core::crypto::AccountId32::new(secret.address) + .to_ss58check_with_version(sp_core::crypto::Ss58AddressFormat::custom(189)); + log_print!(" [{}] {}", i, address); + log_verbose!(" secret: 0x{}", hex::encode(secret.secret)); + } + + log_print!(""); + log_print!(" Exit accounts:"); + if is_final { + log_print!(" All proofs exit to wallet: {}", wallet_address); + } else { + for i in 1..=num_proofs { + let next_secret = derive_wormhole_secret(mnemonic, round + 1, i)?; + let address = sp_core::crypto::AccountId32::new(next_secret.address) + .to_ss58check_with_version(sp_core::crypto::Ss58AddressFormat::custom(189)); + log_print!(" [{}] {} (round {} wormhole)", i, address, round + 1); + } } } - Err(crate::error::QuantusError::Generic("Transaction stream ended unexpectedly".to_string())) + log_print!(""); + log_print!("{}", "=== END DRY RUN ===".bright_yellow()); + log_print!(""); + + Ok(()) } -async fn verify_proof(proof_file: String, node_url: &str) -> crate::error::Result<()> { - use subxt::tx::TxStatus; +/// Parse and display the contents of a proof file for debugging +async fn parse_proof_file( + proof_file: String, + aggregated: bool, + verify: bool, +) -> crate::error::Result<()> { + use qp_wormhole_verifier::WormholeVerifier; - log_print!("Verifying wormhole proof on-chain..."); + log_print!("Parsing proof file: {}", proof_file); - // Read proof from file - let proof_hex = std::fs::read_to_string(&proof_file).map_err(|e| { - crate::error::QuantusError::Generic(format!("Failed to read proof file: {}", e)) - })?; + // Read proof bytes + let proof_bytes = read_proof_file(&proof_file) + .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to read proof: {}", e)))?; - let proof_bytes = hex::decode(proof_hex.trim()) - .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to decode hex: {}", e)))?; + log_print!("Proof size: {} bytes", proof_bytes.len()); - log_verbose!("Proof size: {} bytes", proof_bytes.len()); + let bins_dir = Path::new("generated-bins"); - // Connect to node - let quantus_client = QuantusClient::new(node_url) - .await - .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to connect: {}", e)))?; + if aggregated { + // Load aggregated verifier + let verifier = WormholeVerifier::new_from_files( + &bins_dir.join("aggregated_verifier.bin"), + &bins_dir.join("aggregated_common.bin"), + ) + .map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to load verifier: {}", e)) + })?; - log_verbose!("Connected to node"); + // Deserialize proof using verifier's types + let proof = qp_wormhole_verifier::ProofWithPublicInputs::< + qp_wormhole_verifier::F, + qp_wormhole_verifier::C, + { qp_wormhole_verifier::D }, + >::from_bytes(proof_bytes.clone(), &verifier.circuit_data.common) + .map_err(|e| { + crate::error::QuantusError::Generic(format!( + "Failed to deserialize aggregated proof: {:?}", + e + )) + })?; - // Create the verify transaction payload - let verify_tx = quantus_node::api::tx().wormhole().verify_wormhole_proof(proof_bytes); + log_print!("\nPublic inputs count: {}", proof.public_inputs.len()); + log_verbose!("\nPublic inputs count: {}", proof.public_inputs.len()); + + // Try to parse as aggregated + match qp_wormhole_verifier::parse_aggregated_public_inputs(&proof) { + Ok(agg_inputs) => { + log_print!("\n=== Parsed Aggregated Public Inputs ==="); + log_print!("Asset ID: {}", agg_inputs.asset_id); + log_print!("Volume Fee BPS: {}", agg_inputs.volume_fee_bps); + log_print!( + "Block Hash: 0x{}", + hex::encode(agg_inputs.block_data.block_hash.as_ref()) + ); + log_print!("Block Number: {}", agg_inputs.block_data.block_number); + log_print!("\nAccount Data ({} accounts):", agg_inputs.account_data.len()); + for (i, acct) in agg_inputs.account_data.iter().enumerate() { + log_print!( + " [{}] amount={}, exit=0x{}", + i, + acct.summed_output_amount, + hex::encode(acct.exit_account.as_ref()) + ); + } + log_print!("\nNullifiers ({} nullifiers):", agg_inputs.nullifiers.len()); + for (i, nullifier) in agg_inputs.nullifiers.iter().enumerate() { + log_print!(" [{}] 0x{}", i, hex::encode(nullifier.as_ref())); + } + }, + Err(e) => { + log_print!("Failed to parse as aggregated inputs: {}", e); + }, + } - log_verbose!("Submitting unsigned verification transaction..."); + // Verify if requested + if verify { + log_print!("\n=== Verifying Proof ==="); + match verifier.verify(proof) { + Ok(()) => { + log_success!("Proof verification PASSED"); + }, + Err(e) => { + log_error!("Proof verification FAILED: {}", e); + return Err(crate::error::QuantusError::Generic(format!( + "Proof verification failed: {}", + e + ))); + }, + } + } + } else { + // Load leaf verifier + let verifier = WormholeVerifier::new_from_files( + &bins_dir.join("verifier.bin"), + &bins_dir.join("common.bin"), + ) + .map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to load verifier: {}", e)) + })?; - // Submit as unsigned extrinsic - let unsigned_tx = quantus_client.client().tx().create_unsigned(&verify_tx).map_err(|e| { - crate::error::QuantusError::Generic(format!("Failed to create unsigned tx: {}", e)) - })?; + // Deserialize proof using verifier's types + let proof = qp_wormhole_verifier::ProofWithPublicInputs::< + qp_wormhole_verifier::F, + qp_wormhole_verifier::C, + { qp_wormhole_verifier::D }, + >::from_bytes(proof_bytes, &verifier.circuit_data.common) + .map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to deserialize proof: {:?}", e)) + })?; - let mut tx_progress = unsigned_tx - .submit_and_watch() - .await - .map_err(|e| crate::error::QuantusError::Generic(format!("Failed to submit tx: {}", e)))?; + log_print!("\nPublic inputs count: {}", proof.public_inputs.len()); - // Wait for transaction inclusion - while let Some(Ok(status)) = tx_progress.next().await { - log_verbose!("Transaction status: {:?}", status); - match status { - TxStatus::InBestBlock(tx_in_block) => { - let block_hash = tx_in_block.block_hash(); - log_success!("Proof verified successfully on-chain!"); - log_verbose!("Included in block: {:?}", block_hash); - return Ok(()); - }, - TxStatus::InFinalizedBlock(tx_in_block) => { - let block_hash = tx_in_block.block_hash(); - log_success!("Proof verified successfully on-chain!"); - log_verbose!("Finalized in block: {:?}", block_hash); - return Ok(()); - }, - TxStatus::Error { message } | TxStatus::Invalid { message } => { - return Err(crate::error::QuantusError::Generic(format!( - "Transaction failed: {}", - message - ))); - }, - _ => continue, + let pi = qp_wormhole_verifier::parse_public_inputs(&proof).map_err(|e| { + crate::error::QuantusError::Generic(format!("Failed to parse public inputs: {}", e)) + })?; + + log_print!("\n=== Parsed Leaf Public Inputs ==="); + log_print!("Asset ID: {}", pi.asset_id); + log_print!("Output Amount 1: {}", pi.output_amount_1); + log_print!("Output Amount 2: {}", pi.output_amount_2); + log_print!("Volume Fee BPS: {}", pi.volume_fee_bps); + log_print!("Nullifier: 0x{}", hex::encode(pi.nullifier.as_ref())); + log_print!("Exit Account 1: 0x{}", hex::encode(pi.exit_account_1.as_ref())); + log_print!("Exit Account 2: 0x{}", hex::encode(pi.exit_account_2.as_ref())); + log_print!("Block Hash: 0x{}", hex::encode(pi.block_hash.as_ref())); + log_print!("Block Number: {}", pi.block_number); + + // Verify if requested + if verify { + log_print!("\n=== Verifying Proof ==="); + match verifier.verify(proof) { + Ok(()) => { + log_success!("Proof verification PASSED"); + }, + Err(e) => { + log_error!("Proof verification FAILED: {}", e); + return Err(crate::error::QuantusError::Generic(format!( + "Proof verification failed: {}", + e + ))); + }, + } } } - Err(crate::error::QuantusError::Generic("Transaction stream ended unexpectedly".to_string())) + Ok(()) } #[cfg(test)] mod tests { use super::*; - use plonky2::plonk::circuit_data::CircuitConfig; - use qp_wormhole_circuit::inputs::{ - AggregatedPublicCircuitInputs, CircuitInputs, PublicCircuitInputs, - }; - use qp_wormhole_prover::WormholeProver; - use qp_wormhole_test_helpers::TestInputs; - use qp_wormhole_verifier::WormholeVerifier; + use std::collections::HashSet; use tempfile::NamedTempFile; - /// Helper to get a standard circuit config for tests - fn test_circuit_config() -> CircuitConfig { - CircuitConfig::standard_recursion_zk_config() - } - #[test] fn test_compute_output_amount() { // 0.1% fee (10 bps): output = input * 9990 / 10000 @@ -790,21 +2377,6 @@ mod tests { .contains("too large")); } - #[test] - fn test_validate_aggregation_params() { - // Valid configurations - assert_eq!(validate_aggregation_params(2, 1, 2).unwrap(), 2); - assert_eq!(validate_aggregation_params(9, 2, 3).unwrap(), 9); // 3^2 = 9 - - // Invalid: no proofs, bad branching factor, zero depth - assert!(validate_aggregation_params(0, 1, 2).unwrap_err().contains("No proofs")); - assert!(validate_aggregation_params(2, 1, 1).unwrap_err().contains("Branching factor")); - assert!(validate_aggregation_params(2, 0, 2).unwrap_err().contains("Depth")); - - // Too many proofs for tree size - assert!(validate_aggregation_params(3, 1, 2).unwrap_err().contains("Too many proofs")); - } - #[test] fn test_proof_file_roundtrip() { let temp_file = NamedTempFile::new().unwrap(); @@ -920,269 +2492,325 @@ mod tests { assert_ne!(account1.account_id, account_different.account_id); } - /// Integration test: Generate a real ZK proof using test fixtures and verify it - #[test] - #[ignore] // This test is slow (~30s) - run with `cargo test -- --ignored` - fn test_full_proof_generation_and_verification() { - // Use test fixtures from qp-wormhole-test-helpers - let inputs = CircuitInputs::test_inputs_0(); - - // Verify the test inputs have correct fee configuration - assert_eq!(inputs.public.volume_fee_bps, VOLUME_FEE_BPS); - assert_eq!(inputs.public.asset_id, NATIVE_ASSET_ID); - - // Verify fee constraint is satisfied in test inputs - let input_amount = inputs.private.input_amount; - let output_amount = inputs.public.output_amount; - assert!( - (output_amount as u64) * 10000 <= - (input_amount as u64) * (10000 - VOLUME_FEE_BPS as u64), - "Test inputs violate fee constraint" - ); - - // Create prover and generate proof - let config = test_circuit_config(); - let prover = WormholeProver::new(config.clone()); - let prover_committed = prover.commit(&inputs).expect("Failed to commit inputs"); - let proof = prover_committed.prove().expect("Failed to generate proof"); - - // Parse and verify public inputs from proof - let parsed_public_inputs = - PublicCircuitInputs::try_from(&proof).expect("Failed to parse public inputs"); + // Note: Integration tests for proof generation, serialization, aggregation, and + // multi-account aggregation have been moved to qp-zk-circuits/wormhole/tests/ + // where the test-helpers crate (with TestInputs) is available as a workspace dep. - assert_eq!(parsed_public_inputs.asset_id, inputs.public.asset_id); - assert_eq!(parsed_public_inputs.output_amount, inputs.public.output_amount); - assert_eq!(parsed_public_inputs.volume_fee_bps, inputs.public.volume_fee_bps); - assert_eq!(parsed_public_inputs.nullifier, inputs.public.nullifier); - assert_eq!(parsed_public_inputs.exit_account, inputs.public.exit_account); - assert_eq!(parsed_public_inputs.block_hash, inputs.public.block_hash); - assert_eq!(parsed_public_inputs.parent_hash, inputs.public.parent_hash); - assert_eq!(parsed_public_inputs.block_number, inputs.public.block_number); + /// Test that public inputs parsing matches expected structure + #[test] + fn test_public_inputs_structure() { + use qp_wormhole_inputs::{ + ASSET_ID_INDEX, BLOCK_HASH_END_INDEX, BLOCK_HASH_START_INDEX, BLOCK_NUMBER_INDEX, + EXIT_ACCOUNT_1_END_INDEX, EXIT_ACCOUNT_1_START_INDEX, EXIT_ACCOUNT_2_END_INDEX, + EXIT_ACCOUNT_2_START_INDEX, NULLIFIER_END_INDEX, NULLIFIER_START_INDEX, + OUTPUT_AMOUNT_1_INDEX, OUTPUT_AMOUNT_2_INDEX, PUBLIC_INPUTS_FELTS_LEN, + VOLUME_FEE_BPS_INDEX, + }; - // Create verifier and verify proof - let verifier = WormholeVerifier::new(config, None); - verifier.verify(proof).expect("Proof verification failed"); + // Verify expected public inputs layout for dual-output circuit + assert_eq!(PUBLIC_INPUTS_FELTS_LEN, 21, "Public inputs should be 21 field elements"); + assert_eq!(ASSET_ID_INDEX, 0, "Asset ID should be first"); + assert_eq!(OUTPUT_AMOUNT_1_INDEX, 1, "Output amount 1 should be at index 1"); + assert_eq!(OUTPUT_AMOUNT_2_INDEX, 2, "Output amount 2 should be at index 2"); + assert_eq!(VOLUME_FEE_BPS_INDEX, 3, "Volume fee BPS should be at index 3"); + assert_eq!(NULLIFIER_START_INDEX, 4, "Nullifier should start at index 4"); + assert_eq!(NULLIFIER_END_INDEX, 8, "Nullifier should end at index 8"); + assert_eq!(EXIT_ACCOUNT_1_START_INDEX, 8, "Exit account 1 should start at index 8"); + assert_eq!(EXIT_ACCOUNT_1_END_INDEX, 12, "Exit account 1 should end at index 12"); + assert_eq!(EXIT_ACCOUNT_2_START_INDEX, 12, "Exit account 2 should start at index 12"); + assert_eq!(EXIT_ACCOUNT_2_END_INDEX, 16, "Exit account 2 should end at index 16"); + assert_eq!(BLOCK_HASH_START_INDEX, 16, "Block hash should start at index 16"); + assert_eq!(BLOCK_HASH_END_INDEX, 20, "Block hash should end at index 20"); + assert_eq!(BLOCK_NUMBER_INDEX, 20, "Block number should be at index 20"); } - /// Integration test: Generate proof, serialize/deserialize, then verify + /// Test that constants match expected on-chain configuration #[test] - #[ignore] // This test is slow - run with `cargo test -- --ignored` - fn test_proof_serialization_roundtrip() { - let inputs = CircuitInputs::test_inputs_0(); - let config = test_circuit_config(); - - // Generate proof - let prover = WormholeProver::new(config.clone()); - let proof = prover.commit(&inputs).unwrap().prove().unwrap(); - - // Serialize to bytes - let proof_bytes = proof.to_bytes(); - - // Write to temp file and read back - let temp_file = NamedTempFile::new().unwrap(); - let path = temp_file.path().to_str().unwrap(); - write_proof_file(path, &proof_bytes).unwrap(); - let read_bytes = read_proof_file(path).unwrap(); + fn test_constants_match_chain_config() { + // Volume fee rate should be 10 bps (0.1%) + assert_eq!(VOLUME_FEE_BPS, 10, "Volume fee should be 10 bps"); - assert_eq!(proof_bytes, read_bytes, "Proof bytes should match after file roundtrip"); + // Native asset ID should be 0 + assert_eq!(NATIVE_ASSET_ID, 0, "Native asset ID should be 0"); - // Deserialize and verify - let verifier = WormholeVerifier::new(config, None); - let deserialized_proof = plonky2::plonk::proof::ProofWithPublicInputs::< - qp_zk_circuits_common::circuit::F, - qp_zk_circuits_common::circuit::C, - { qp_zk_circuits_common::circuit::D }, - >::from_bytes(read_bytes, &verifier.circuit_data.common) - .expect("Failed to deserialize proof"); + // Scale down factor should be 10^10 (12 decimals -> 2 decimals) + assert_eq!(SCALE_DOWN_FACTOR, 10_000_000_000, "Scale down factor should be 10^10"); - verifier - .verify(deserialized_proof) - .expect("Deserialized proof verification failed"); + // Verify scale down: 1 token with 12 decimals = 10^12 units + // After quantization: 10^12 / 10^10 = 100 (which is 1.00 in 2 decimal places) + let one_token_12_decimals: u128 = 1_000_000_000_000; + let quantized = quantize_funding_amount(one_token_12_decimals).unwrap(); + assert_eq!(quantized, 100, "1 token should quantize to 100 (1.00 with 2 decimals)"); } - /// Integration test: Generate multiple proofs with different inputs #[test] - #[ignore] // This test is slow - run with `cargo test -- --ignored` - fn test_multiple_proof_generation() { - let config = test_circuit_config(); - - // Generate proofs for both test input sets - let inputs_0 = CircuitInputs::test_inputs_0(); - let inputs_1 = CircuitInputs::test_inputs_1(); - - let prover_0 = WormholeProver::new(config.clone()); - let proof_0 = prover_0.commit(&inputs_0).unwrap().prove().unwrap(); - - let prover_1 = WormholeProver::new(config.clone()); - let proof_1 = prover_1.commit(&inputs_1).unwrap().prove().unwrap(); - - // Verify both proofs - let verifier = WormholeVerifier::new(config, None); - verifier.verify(proof_0.clone()).expect("Proof 0 verification failed"); - verifier.verify(proof_1.clone()).expect("Proof 1 verification failed"); - - // Verify public inputs are different (different nullifiers, etc.) - let public_0 = PublicCircuitInputs::try_from(&proof_0).unwrap(); - let public_1 = PublicCircuitInputs::try_from(&proof_1).unwrap(); - - assert_ne!(public_0.nullifier, public_1.nullifier, "Nullifiers should be different"); - assert_ne!(public_0.block_hash, public_1.block_hash, "Block hashes should be different"); + fn test_volume_fee_bps_constant() { + // Ensure VOLUME_FEE_BPS matches expected value (10 bps = 0.1%) + assert_eq!(VOLUME_FEE_BPS, 10); } - /// Integration test: Aggregate proofs and verify aggregated proof #[test] - #[ignore] // This test is slow (~60s) - run with `cargo test -- --ignored` - fn test_proof_aggregation() { - use qp_wormhole_aggregator::aggregator::WormholeProofAggregator; - - let config = test_circuit_config(); - - // Generate a proof - let inputs = CircuitInputs::test_inputs_0(); - let prover = WormholeProver::new(config.clone()); - let proof = prover.commit(&inputs).unwrap().prove().unwrap(); + fn test_aggregation_config_deserialization_matches_upstream_format() { + // This test verifies that our local AggregationConfig struct can deserialize + // the same JSON format that the upstream CircuitBinsConfig produces. + // If the upstream adds/removes/renames fields, this test will catch it. + let json = r#"{ + "num_leaf_proofs": 8, + "hashes": { + "common": "aabbcc", + "verifier": "ddeeff", + "prover": "112233", + "aggregated_common": "445566", + "aggregated_verifier": "778899" + } + }"#; + + let config: AggregationConfig = serde_json::from_str(json).unwrap(); + assert_eq!(config.num_leaf_proofs, 8); + + let hashes = config.hashes.unwrap(); + assert_eq!(hashes.prover.as_deref(), Some("112233")); + assert_eq!(hashes.aggregated_common.as_deref(), Some("445566")); + assert_eq!(hashes.aggregated_verifier.as_deref(), Some("778899")); + } - // Create aggregator with default config (branching_factor=2, depth=1) - let verifier = WormholeVerifier::new(config, None); - let mut aggregator = WormholeProofAggregator::new(verifier.circuit_data); + fn mk_accounts(n: usize) -> Vec<[u8; 32]> { + (0..n) + .map(|i| { + let mut a = [0u8; 32]; + a[0] = (i as u8).wrapping_add(1); // avoid [0;32] + a + }) + .collect() + } - // Add proof to aggregator - aggregator.push_proof(proof.clone()).expect("Failed to push proof"); + fn proof_outputs_for_inputs(input_amounts: &[u128], fee_bps: u32) -> Vec { + input_amounts + .iter() + .map(|&input| { + let input_quantized = quantize_funding_amount(input).unwrap_or(0); + compute_output_amount(input_quantized, fee_bps) + }) + .collect() + } - // Aggregate - let aggregated_result = aggregator.aggregate().expect("Aggregation failed"); + fn total_output_for_inputs(input_amounts: &[u128], fee_bps: u32) -> u64 { + proof_outputs_for_inputs(input_amounts, fee_bps) + .into_iter() + .map(|x| x as u64) + .sum() + } - // Parse aggregated public inputs - let aggregated_public_inputs = AggregatedPublicCircuitInputs::try_from_slice( - aggregated_result.proof.public_inputs.as_slice(), - ) - .expect("Failed to parse aggregated public inputs"); + /// Find some input that yields at least `min_out` quantized output after fee. + /// Keeps tests robust even if quantization constants change. + fn find_input_for_min_output(fee_bps: u32, min_out: u32) -> u128 { + let mut input: u128 = 1; + for _ in 0..80 { + let q = quantize_funding_amount(input).unwrap_or(0); + let out = compute_output_amount(q, fee_bps); + if out >= min_out { + return input; + } + // grow fast but safely + input = input.saturating_mul(10); + } + panic!("Could not find input producing output >= {}", min_out); + } - // Verify aggregated proof structure - assert_eq!(aggregated_public_inputs.asset_id, NATIVE_ASSET_ID, "Asset ID should be native"); - assert_eq!( - aggregated_public_inputs.volume_fee_bps, VOLUME_FEE_BPS, - "Volume fee BPS should match" - ); - assert!( - !aggregated_public_inputs.nullifiers.is_empty(), - "Should have at least one nullifier" - ); - assert!( - !aggregated_public_inputs.account_data.is_empty(), - "Should have at least one account" - ); + // -------------------------- + // random_partition tests + // -------------------------- - // Verify the aggregated proof locally - aggregated_result - .circuit_data - .verify(aggregated_result.proof) - .expect("Aggregated proof verification failed"); + #[test] + fn random_partition_n0() { + let parts = random_partition(100, 0, 1); + assert!(parts.is_empty()); } - /// Integration test: Aggregate multiple proofs with different exit accounts #[test] - #[ignore] // This test is very slow (~120s) - run with `cargo test -- --ignored` - fn test_proof_aggregation_multiple_accounts() { - use qp_wormhole_aggregator::aggregator::WormholeProofAggregator; - - let config = test_circuit_config(); - - // Generate proofs with different inputs (different exit accounts) - let inputs_0 = CircuitInputs::test_inputs_0(); - let inputs_1 = CircuitInputs::test_inputs_1(); - - let prover_0 = WormholeProver::new(config.clone()); - let proof_0 = prover_0.commit(&inputs_0).unwrap().prove().unwrap(); + fn random_partition_n1() { + let total = 12345u128; + let parts = random_partition(total, 1, 9999); + assert_eq!(parts, vec![total]); + } - let prover_1 = WormholeProver::new(config.clone()); - let proof_1 = prover_1.commit(&inputs_1).unwrap().prove().unwrap(); + #[test] + fn random_partition_total_less_than_min_total_falls_back_to_equalish() { + // total < min_per_part * n => fallback path + let total = 5u128; + let n = 10usize; + let min_per_part = 1u128; - // Create aggregator - let verifier = WormholeVerifier::new(config, None); - let mut aggregator = WormholeProofAggregator::new(verifier.circuit_data); + let parts = random_partition(total, n, min_per_part); - // Add both proofs - aggregator.push_proof(proof_0).expect("Failed to push proof 0"); - aggregator.push_proof(proof_1).expect("Failed to push proof 1"); + assert_eq!(parts.len(), n); + assert_eq!(parts.iter().sum::(), total); - // Aggregate - let aggregated_result = aggregator.aggregate().expect("Aggregation failed"); + // fallback behavior: per_part=0, remainder=5, last gets remainder + for part in parts.iter().take(n - 1) { + assert_eq!(*part, 0); + } + assert_eq!(parts[n - 1], 5); + } - // Parse aggregated public inputs - let aggregated_public_inputs = AggregatedPublicCircuitInputs::try_from_slice( - aggregated_result.proof.public_inputs.as_slice(), - ) - .expect("Failed to parse aggregated public inputs"); + #[test] + fn random_partition_min_achievable_invariants_hold() { + let total = 100u128; + let n = 10usize; + let min_per_part = 3u128; + + for _ in 0..200 { + let parts = random_partition(total, n, min_per_part); + assert_eq!(parts.len(), n); + assert_eq!(parts.iter().sum::(), total); + assert!(parts.iter().all(|&p| p >= min_per_part)); + } + } - // Verify we have 2 nullifiers (one per proof) - assert_eq!( - aggregated_public_inputs.nullifiers.len(), - 2, - "Should have 2 nullifiers for 2 proofs" - ); + #[test] + fn random_partition_distributable_zero_all_min() { + let n = 10usize; + let min_per_part = 3u128; + let total = min_per_part * n as u128; - // Verify all nullifiers are unique - assert_ne!( - aggregated_public_inputs.nullifiers[0], aggregated_public_inputs.nullifiers[1], - "Nullifiers should be unique" - ); + let parts = random_partition(total, n, min_per_part); - // Verify the aggregated proof - aggregated_result - .circuit_data - .verify(aggregated_result.proof) - .expect("Aggregated proof verification failed"); + assert_eq!(parts.len(), n); + assert_eq!(parts.iter().sum::(), total); + assert!(parts.iter().all(|&p| p == min_per_part)); } - /// Test that public inputs parsing matches expected structure + // -------------------------- + // compute_random_output_assignments tests + // -------------------------- + #[test] - fn test_public_inputs_structure() { - use qp_wormhole_circuit::inputs::{ - ASSET_ID_INDEX, BLOCK_HASH_END_INDEX, BLOCK_HASH_START_INDEX, BLOCK_NUMBER_INDEX, - EXIT_ACCOUNT_END_INDEX, EXIT_ACCOUNT_START_INDEX, NULLIFIER_END_INDEX, - NULLIFIER_START_INDEX, OUTPUT_AMOUNT_INDEX, PARENT_HASH_END_INDEX, - PARENT_HASH_START_INDEX, PUBLIC_INPUTS_FELTS_LEN, VOLUME_FEE_BPS_INDEX, - }; + fn compute_random_output_assignments_empty_inputs_or_targets() { + let targets = mk_accounts(3); + assert!(compute_random_output_assignments(&[], &targets, 0).is_empty()); - // Verify expected public inputs layout - assert_eq!(PUBLIC_INPUTS_FELTS_LEN, 20, "Public inputs should be 20 field elements"); - assert_eq!(ASSET_ID_INDEX, 0, "Asset ID should be first"); - assert_eq!(OUTPUT_AMOUNT_INDEX, 1, "Output amount should be second"); - assert_eq!(VOLUME_FEE_BPS_INDEX, 2, "Volume fee BPS should be third"); - assert_eq!(NULLIFIER_START_INDEX, 3, "Nullifier should start at index 3"); - assert_eq!(NULLIFIER_END_INDEX, 7, "Nullifier should end at index 7"); - assert_eq!(EXIT_ACCOUNT_START_INDEX, 7, "Exit account should start at index 7"); - assert_eq!(EXIT_ACCOUNT_END_INDEX, 11, "Exit account should end at index 11"); - assert_eq!(BLOCK_HASH_START_INDEX, 11, "Block hash should start at index 11"); - assert_eq!(BLOCK_HASH_END_INDEX, 15, "Block hash should end at index 15"); - assert_eq!(PARENT_HASH_START_INDEX, 15, "Parent hash should start at index 15"); - assert_eq!(PARENT_HASH_END_INDEX, 19, "Parent hash should end at index 19"); - assert_eq!(BLOCK_NUMBER_INDEX, 19, "Block number should be at index 19"); + let inputs = vec![1u128, 2u128, 3u128]; + assert!(compute_random_output_assignments(&inputs, &[], 0).is_empty()); } - /// Test that constants match expected on-chain configuration #[test] - fn test_constants_match_chain_config() { - // Volume fee rate should be 10 bps (0.1%) - assert_eq!(VOLUME_FEE_BPS, 10, "Volume fee should be 10 bps"); + fn compute_random_output_assignments_basic_invariants() { + let fee_bps = 0u32; + + // ensure non-zero outputs for meaningful checks + let input = find_input_for_min_output(fee_bps, 5); + let input_amounts = vec![input, input, input, input, input]; + let targets = mk_accounts(4); + + let assignments = compute_random_output_assignments(&input_amounts, &targets, fee_bps); + assert_eq!(assignments.len(), input_amounts.len()); + + let proof_outputs = proof_outputs_for_inputs(&input_amounts, fee_bps); + + // per-proof sum matches + for (i, a) in assignments.iter().enumerate() { + let per_proof_sum = a.output_amount_1 as u64 + a.output_amount_2 as u64; + assert_eq!(per_proof_sum, proof_outputs[i] as u64); + + // If an output amount is non-zero, the account must be in targets + if a.output_amount_1 > 0 { + assert!(targets.contains(&a.exit_account_1)); + } else { + // if amount is zero, account can be zero or anything; current impl keeps default + // [0;32] + } + if a.output_amount_2 > 0 { + assert!(targets.contains(&a.exit_account_2)); + assert_ne!(a.exit_account_2, a.exit_account_1); // should be different if both used + } else { + // current impl keeps default [0;32] + assert_eq!(a.exit_account_2, [0u8; 32]); + } + } - // Native asset ID should be 0 - assert_eq!(NATIVE_ASSET_ID, 0, "Native asset ID should be 0"); + // total sum matches + let total_assigned: u64 = assignments + .iter() + .map(|a| a.output_amount_1 as u64 + a.output_amount_2 as u64) + .sum(); - // Scale down factor should be 10^10 (12 decimals -> 2 decimals) - assert_eq!(SCALE_DOWN_FACTOR, 10_000_000_000, "Scale down factor should be 10^10"); + let total_expected = total_output_for_inputs(&input_amounts, fee_bps); + assert_eq!(total_assigned, total_expected); + } - // Verify scale down: 1 token with 12 decimals = 10^12 units - // After quantization: 10^12 / 10^10 = 100 (which is 1.00 in 2 decimal places) - let one_token_12_decimals: u128 = 1_000_000_000_000; - let quantized = quantize_funding_amount(one_token_12_decimals).unwrap(); - assert_eq!(quantized, 100, "1 token should quantize to 100 (1.00 with 2 decimals)"); + #[test] + fn compute_random_output_assignments_more_targets_than_capacity_still_conserves_funds() { + // Capacity: each proof can hit at most 2 targets, so distinct-used-targets <= 2 * + // num_proofs. Set num_targets > 2*num_proofs and ensure total_output >= num_targets so + // the partition *wants* to give each target >= 1 (though algorithm can't satisfy it). + let fee_bps = 0u32; + let num_proofs = 1usize; + let num_targets = 5usize; + + let input = find_input_for_min_output(fee_bps, 10); // ensure total_output is "big enough" + let input_amounts = vec![input; num_proofs]; + let targets = mk_accounts(num_targets); + + let assignments = compute_random_output_assignments(&input_amounts, &targets, fee_bps); + assert_eq!(assignments.len(), num_proofs); + + // total preserved + let total_assigned: u64 = assignments + .iter() + .map(|a| a.output_amount_1 as u64 + a.output_amount_2 as u64) + .sum(); + let total_expected = total_output_for_inputs(&input_amounts, fee_bps); + assert_eq!(total_assigned, total_expected); + + // used targets bounded by 2*num_proofs and thus < num_targets + let mut used = HashSet::new(); + for a in &assignments { + if a.output_amount_1 > 0 { + used.insert(a.exit_account_1); + } + if a.output_amount_2 > 0 { + used.insert(a.exit_account_2); + } + } + assert!(used.len() <= 2 * num_proofs); + assert!(used.len() < num_targets); } #[test] - fn test_volume_fee_bps_constant() { - // Ensure VOLUME_FEE_BPS matches expected value (10 bps = 0.1%) - assert_eq!(VOLUME_FEE_BPS, 10); + fn compute_random_output_assignments_total_output_less_than_num_targets_does_not_panic_and_conserves( + ) { + // This forces random_partition into its fallback branch inside + // compute_random_output_assignments because min_per_target = 1 and total_output < + // num_targets. + let fee_bps = 0u32; + + let num_targets = 50usize; + let targets = mk_accounts(num_targets); + + // Try to get very small total output: two proofs with output likely >= 1 each, + // but still far less than 50. + let input = find_input_for_min_output(fee_bps, 1); + let input_amounts = vec![input, input]; + + let assignments = compute_random_output_assignments(&input_amounts, &targets, fee_bps); + assert_eq!(assignments.len(), input_amounts.len()); + + let total_assigned: u64 = assignments + .iter() + .map(|a| a.output_amount_1 as u64 + a.output_amount_2 as u64) + .sum(); + let total_expected = total_output_for_inputs(&input_amounts, fee_bps); + assert_eq!(total_assigned, total_expected); + + // For each assignment: if non-zero amount then account must be in targets. + for a in &assignments { + if a.output_amount_1 > 0 { + assert!(targets.contains(&a.exit_account_1)); + } + if a.output_amount_2 > 0 { + assert!(targets.contains(&a.exit_account_2)); + assert_ne!(a.exit_account_2, a.exit_account_1); + } + } } } diff --git a/src/main.rs b/src/main.rs index 684b112..c467064 100644 --- a/src/main.rs +++ b/src/main.rs @@ -72,15 +72,22 @@ async fn main() -> Result<(), QuantusError> { wait_for_transaction: cli.wait_for_transaction, }; - // Execute the command - match cli::execute_command(cli.command, &cli.node_url, cli.verbose, execution_mode).await { + // Execute the command with timing + let start_time = std::time::Instant::now(); + let result = + cli::execute_command(cli.command, &cli.node_url, cli.verbose, execution_mode).await; + let elapsed = start_time.elapsed(); + + match result { Ok(_) => { log_verbose!(""); log_verbose!("Command executed successfully!"); + log_print!("⏱️ Completed in {:.2}s", elapsed.as_secs_f64()); Ok(()) }, Err(e) => { log_error!("{}", e); + log_print!("⏱️ Failed after {:.2}s", elapsed.as_secs_f64()); std::process::exit(1); }, } diff --git a/src/quantus_metadata.scale b/src/quantus_metadata.scale index 910bcbc..f55447c 100644 Binary files a/src/quantus_metadata.scale and b/src/quantus_metadata.scale differ diff --git a/src/wallet/keystore.rs b/src/wallet/keystore.rs index aeed76b..752a8f9 100644 --- a/src/wallet/keystore.rs +++ b/src/wallet/keystore.rs @@ -6,6 +6,8 @@ /// - Managing wallet files on disk with quantum-resistant security use crate::error::{Result, WalletError}; use qp_rusty_crystals_dilithium::ml_dsa_87::{Keypair, PublicKey, SecretKey}; +#[cfg(test)] +use qp_rusty_crystals_hdwallet::SensitiveBytes32; use serde::{Deserialize, Serialize}; #[cfg(test)] use sp_core::crypto::Ss58AddressFormat; @@ -214,8 +216,8 @@ impl Keystore { // 3. Use password hash as AES-256 key (quantum-safe with 256-bit key) let hash_bytes = password_hash.hash.as_ref().unwrap().as_bytes(); - let aes_key = Key::::from_slice(&hash_bytes[..32]); - let cipher = Aes256Gcm::new(aes_key); + let aes_key = Key::::from(<[u8; 32]>::try_from(&hash_bytes[..32]).unwrap()); + let cipher = Aes256Gcm::new(&aes_key); // 4. Generate nonce and encrypt the wallet data let nonce = Aes256Gcm::generate_nonce(&mut AesOsRng); @@ -255,13 +257,13 @@ impl Keystore { // 2. Derive AES key from verified password hash let hash_bytes = password_hash.hash.as_ref().unwrap().as_bytes(); - let aes_key = Key::::from_slice(&hash_bytes[..32]); - let cipher = Aes256Gcm::new(aes_key); + let aes_key = Key::::from(<[u8; 32]>::try_from(&hash_bytes[..32]).unwrap()); + let cipher = Aes256Gcm::new(&aes_key); // 3. Decrypt the data - let nonce = Nonce::from_slice(&encrypted.aes_nonce); + let nonce = Nonce::from(<[u8; 12]>::try_from(&encrypted.aes_nonce[..]).unwrap()); let decrypted_data = cipher - .decrypt(nonce, encrypted.encrypted_data.as_ref()) + .decrypt(&nonce, encrypted.encrypted_data.as_ref()) .map_err(|_| WalletError::Decryption)?; // 4. Deserialize the wallet data @@ -281,8 +283,8 @@ mod tests { #[test] fn test_quantum_keypair_from_dilithium_keypair() { // Generate a test keypair - let entropy = [1u8; 32]; - let dilithium_keypair = Keypair::generate(&entropy); + let mut entropy = [1u8; 32]; + let dilithium_keypair = Keypair::generate(SensitiveBytes32::from(&mut entropy)); // Convert to QuantumKeyPair let quantum_keypair = QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); @@ -295,8 +297,8 @@ mod tests { #[test] fn test_quantum_keypair_to_dilithium_keypair_roundtrip() { // Generate a test keypair - let entropy = [2u8; 32]; - let original_keypair = Keypair::generate(&entropy); + let mut entropy = [2u8; 32]; + let original_keypair = Keypair::generate(SensitiveBytes32::from(&mut entropy)); // Convert to QuantumKeyPair and back let quantum_keypair = QuantumKeyPair::from_dilithium_keypair(&original_keypair); @@ -420,8 +422,8 @@ mod tests { // Start with a Dilithium keypair sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom(189)); - let entropy = [3u8; 32]; - let dilithium_keypair = Keypair::generate(&entropy); + let mut entropy = [3u8; 32]; + let dilithium_keypair = Keypair::generate(SensitiveBytes32::from(&mut entropy)); // Convert through different paths let quantum_from_dilithium = QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); @@ -664,8 +666,8 @@ mod tests { fn test_keypair_data_integrity() { // Generate multiple keypairs and verify they maintain data integrity for i in 0..5 { - let entropy = [i as u8; 32]; - let dilithium_keypair = Keypair::generate(&entropy); + let mut entropy = [i as u8; 32]; + let dilithium_keypair = Keypair::generate(SensitiveBytes32::from(&mut entropy)); let quantum_keypair = QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); // Print actual key sizes for debugging (first iteration only) diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index f92ec96..0837d2d 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -10,13 +10,13 @@ pub mod password; use crate::error::{Result, WalletError}; pub use keystore::{Keystore, QuantumKeyPair, WalletData}; -use qp_rusty_crystals_hdwallet::{generate_mnemonic, HDLattice}; +use qp_rusty_crystals_hdwallet::{derive_key_from_mnemonic, generate_mnemonic, SensitiveBytes32}; use rand::{rng, RngCore}; use serde::{Deserialize, Serialize}; use sp_runtime::traits::IdentifyAccount; -/// Default derivation path for Quantus wallets: m/44'/189189'/0'/0/0 -pub const DEFAULT_DERIVATION_PATH: &str = "m/44'/189189'/0'/0/0"; +/// Default derivation path for Quantus wallets: m/44'/189189'/0'/0'/0' +pub const DEFAULT_DERIVATION_PATH: &str = "m/44'/189189'/0'/0'/0'"; /// Wallet information structure #[derive(Debug, Clone, Serialize, Deserialize)] @@ -69,11 +69,9 @@ impl WalletManager { // Generate a new Dilithium keypair using derivation path let mut seed = [0u8; 32]; rng().fill_bytes(&mut seed); - let mnemonic = generate_mnemonic(24, seed).map_err(|_| WalletError::KeyGeneration)?; - let lattice = - HDLattice::from_mnemonic(&mnemonic, None).expect("Failed to generate lattice"); - let dilithium_keypair = lattice - .generate_derived_keys(derivation_path) + let sensitive_seed = SensitiveBytes32::from(&mut seed); + let mnemonic = generate_mnemonic(sensitive_seed).map_err(|_| WalletError::KeyGeneration)?; + let dilithium_keypair = derive_key_from_mnemonic(&mnemonic, None, derivation_path) .map_err(|_| WalletError::KeyGeneration)?; let quantum_keypair = QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); @@ -219,13 +217,14 @@ impl WalletManager { return Err(WalletError::AlreadyExists.into()); } - // Generate new mnemonic and use master seed directly + // Generate new mnemonic and use master seed directly (no derivation path) let mut seed = [0u8; 32]; rng().fill_bytes(&mut seed); - let mnemonic = generate_mnemonic(24, seed).map_err(|_| WalletError::KeyGeneration)?; - let lattice = - HDLattice::from_mnemonic(&mnemonic, None).map_err(|_| WalletError::KeyGeneration)?; - let dilithium_keypair = lattice.generate_keys(); + let sensitive_seed = SensitiveBytes32::from(&mut seed); + let mnemonic = generate_mnemonic(sensitive_seed).map_err(|_| WalletError::KeyGeneration)?; + // For "no derivation" mode, we use the root path m/ + let dilithium_keypair = derive_key_from_mnemonic(&mnemonic, None, "m/44'/189189'/0'") + .map_err(|_| WalletError::KeyGeneration)?; let quantum_keypair = QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); // Create wallet data @@ -272,10 +271,9 @@ impl WalletManager { return Err(WalletError::AlreadyExists.into()); } - // Use mnemonic to generate master seed directly - let lattice = - HDLattice::from_mnemonic(mnemonic, None).map_err(|_| WalletError::InvalidMnemonic)?; - let dilithium_keypair = lattice.generate_keys(); + // Use mnemonic to generate keys directly (no derivation path) + let dilithium_keypair = derive_key_from_mnemonic(mnemonic, None, "m/44'/189189'/0'") + .map_err(|_| WalletError::InvalidMnemonic)?; let quantum_keypair = QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); // Create wallet data @@ -325,11 +323,8 @@ impl WalletManager { } // Validate and import from mnemonic using derivation path - let lattice = - HDLattice::from_mnemonic(mnemonic, None).map_err(|_| WalletError::InvalidMnemonic)?; - let dilithium_keypair = lattice - .generate_derived_keys(derivation_path) - .map_err(|_| WalletError::KeyGeneration)?; + let dilithium_keypair = derive_key_from_mnemonic(mnemonic, None, derivation_path) + .map_err(|_| WalletError::InvalidMnemonic)?; let quantum_keypair = QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); // Create wallet data @@ -604,8 +599,10 @@ mod tests { let keystore = keystore::Keystore::new(temp_dir.path()); // Create test wallet data - let entropy = [1u8; 32]; // Use fixed entropy for deterministic tests - let dilithium_keypair = qp_rusty_crystals_dilithium::ml_dsa_87::Keypair::generate(&entropy); + let mut entropy = [1u8; 32]; // Use fixed entropy for deterministic tests + let dilithium_keypair = qp_rusty_crystals_dilithium::ml_dsa_87::Keypair::generate( + SensitiveBytes32::from(&mut entropy), + ); let quantum_keypair = keystore::QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); let mut metadata = std::collections::HashMap::new(); @@ -654,8 +651,10 @@ mod tests { #[tokio::test] async fn test_quantum_keypair_address_generation() { // Generate keypair - let entropy = [2u8; 32]; // Use different entropy for variety - let dilithium_keypair = qp_rusty_crystals_dilithium::ml_dsa_87::Keypair::generate(&entropy); + let mut entropy = [2u8; 32]; // Use different entropy for variety + let dilithium_keypair = qp_rusty_crystals_dilithium::ml_dsa_87::Keypair::generate( + SensitiveBytes32::from(&mut entropy), + ); let quantum_keypair = keystore::QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); // Test address generation @@ -678,8 +677,10 @@ mod tests { let keystore = keystore::Keystore::new(temp_dir.path()); // Create and encrypt wallet data - let entropy = [3u8; 32]; // Use different entropy for each test - let dilithium_keypair = qp_rusty_crystals_dilithium::ml_dsa_87::Keypair::generate(&entropy); + let mut entropy = [3u8; 32]; // Use different entropy for each test + let dilithium_keypair = qp_rusty_crystals_dilithium::ml_dsa_87::Keypair::generate( + SensitiveBytes32::from(&mut entropy), + ); let quantum_keypair = keystore::QuantumKeyPair::from_dilithium_keypair(&dilithium_keypair); let wallet_data = keystore::WalletData { @@ -772,8 +773,8 @@ mod tests { let (wallet_manager, _temp_dir) = create_test_wallet_manager().await; let test_mnemonic = "orchard answer curve patient visual flower maze noise retreat penalty cage small earth domain scan pitch bottom crunch theme club client swap slice raven"; - let expected_address = "qznMJss7Ls1SWBhvvL2CSHVbgTxEfnL9GgpvMTq5CWMEwfCoe"; // default derivation path index 0 - let expected_address_no_derive = "qznBvupPsA9T8VJDuTDokKPiNUe88zMMUtHGA1AsGc8fXKSSA"; + let expected_address = "qzoog56PJKvDwqo9GwkzRN74kxEgDEspxu5zVA62y18ttt3tG"; // default derivation path index 0 + let expected_address_no_derive = "qzofkFbmnEYLX6iHwqJ9uKYXFi7ypQwcBBMxcYYLVD17vGpsm"; let imported_wallet = wallet_manager .import_wallet("imported-test-wallet", test_mnemonic, Some("import-password")) diff --git a/tests/wormhole_integration.rs b/tests/wormhole_integration.rs index 7a2b873..1353029 100644 --- a/tests/wormhole_integration.rs +++ b/tests/wormhole_integration.rs @@ -16,17 +16,16 @@ use plonky2::plonk::{circuit_data::CircuitConfig, proof::ProofWithPublicInputs}; use qp_wormhole_circuit::{ - inputs::{ - AggregatedPublicCircuitInputs, CircuitInputs, PrivateCircuitInputs, PublicCircuitInputs, - }, + inputs::{CircuitInputs, PrivateCircuitInputs}, nullifier::Nullifier, }; +use qp_wormhole_inputs::{AggregatedPublicCircuitInputs, PublicCircuitInputs}; use qp_wormhole_prover::WormholeProver; -use qp_wormhole_verifier::WormholeVerifier; + use qp_zk_circuits_common::{ circuit::{C, D, F}, storage_proof::prepare_proof_for_circuit, - utils::{BytesDigest, Digest}, + utils::{digest_felts_to_bytes, BytesDigest}, }; use quantus_cli::{ chain::{ @@ -249,8 +248,9 @@ async fn submit_wormhole_transfer( println!(" Unspendable account: 0x{}", hex::encode(unspendable_account_bytes)); println!(" Exit account: 0x{}", hex::encode(exit_account_bytes)); - // Create and submit transfer to unspendable account - let transfer_tx = quantus_node::api::tx().wormhole().transfer_native( + // Fund via Balances (wormhole has no transfer_native; WormholeProofRecorderExtension + // records every Balances transfer and emits NativeTransferred) + let transfer_tx = quantus_node::api::tx().balances().transfer_allow_death( subxt::ext::subxt_core::utils::MultiAddress::Id(unspendable_account_id.clone()), funding_amount, ); @@ -263,13 +263,14 @@ async fn submit_wormhole_transfer( }; // Submit transaction and get the actual block hash where it was included - let block_hash = submit_and_get_block_hash(quantus_client, &quantum_keypair, transfer_tx) - .await - .map_err(|e| format!("Transfer failed: {}", e))?; + let block_hash: subxt::utils::H256 = + submit_and_get_block_hash(quantus_client, &quantum_keypair, transfer_tx) + .await + .map_err(|e| format!("Transfer failed: {}", e))?; println!(" Transfer included in block: {:?}", block_hash); - // Find the NativeTransferred event that matches our unspendable account + // WormholeProofRecorderExtension emits NativeTransferred for every Balances transfer let events_api = client .events() .at(block_hash) @@ -410,22 +411,25 @@ async fn generate_proof_from_transfer( funding_account: BytesDigest::try_from(transfer_data.funding_account.as_ref() as &[u8]) .map_err(|e| format!("Failed to convert funding account: {}", e))?, storage_proof: processed_storage_proof, - unspendable_account: Digest::from(transfer_data.unspendable_account).into(), + unspendable_account: digest_felts_to_bytes(transfer_data.unspendable_account), + parent_hash, state_root, extrinsics_root, digest, input_amount: input_amount_quantized, }, public: PublicCircuitInputs { - output_amount: output_amount_quantized, + output_amount_1: output_amount_quantized, + output_amount_2: 0, // No change output for single-output spend volume_fee_bps: VOLUME_FEE_BPS, - nullifier: Nullifier::from_preimage(secret_digest, transfer_data.transfer_count) - .hash - .into(), - exit_account: exit_account_digest, + nullifier: digest_felts_to_bytes( + Nullifier::from_preimage(secret_digest, transfer_data.transfer_count).hash, + ), + exit_account_1: exit_account_digest, + exit_account_2: BytesDigest::try_from([0u8; 32].as_ref()) + .map_err(|e| format!("Failed to convert zero exit account: {}", e))?, block_hash: BytesDigest::try_from(block_hash.as_ref()) .map_err(|e| format!("Failed to convert block hash: {}", e))?, - parent_hash, block_number, asset_id: NATIVE_ASSET_ID, }, @@ -438,7 +442,8 @@ async fn generate_proof_from_transfer( let proof: ProofWithPublicInputs<_, _, 2> = prover_next.prove().map_err(|e| format!("Proof generation failed: {}", e))?; - let public_inputs = PublicCircuitInputs::try_from(&proof) + use qp_wormhole_circuit::inputs::ParsePublicInputs; + let public_inputs = PublicCircuitInputs::try_from_proof(&proof) .map_err(|e| format!("Failed to parse public inputs: {}", e))?; let proof_bytes = proof.to_bytes(); @@ -454,7 +459,7 @@ async fn submit_single_proof_for_verification( ) -> Result<(), String> { println!(" Submitting single proof for on-chain verification..."); - let verify_tx = quantus_node::api::tx().wormhole().verify_wormhole_proof(proof_bytes); + let verify_tx = quantus_node::api::tx().wormhole().verify_aggregated_proof(proof_bytes); let unsigned_tx = quantus_client .client() @@ -495,48 +500,42 @@ async fn submit_single_proof_for_verification( /// Aggregate multiple proofs into one fn aggregate_proofs( proof_contexts: Vec, - depth: usize, - branching_factor: usize, + num_leaf_proofs: usize, ) -> Result { - use qp_wormhole_aggregator::{ - aggregator::WormholeProofAggregator, circuits::tree::TreeAggregationConfig, - }; + use qp_wormhole_aggregator::aggregator::WormholeProofAggregator; + use qp_zk_circuits_common::aggregation::AggregationConfig; println!( - " Aggregating {} proofs (depth={}, branching_factor={})...", + " Aggregating {} proofs (num_leaf_proofs={})...", proof_contexts.len(), - depth, - branching_factor + num_leaf_proofs, ); let config = CircuitConfig::standard_recursion_zk_config(); - let verifier = WormholeVerifier::new(config.clone(), None); - - let aggregation_config = TreeAggregationConfig::new(branching_factor, depth as u32); + let aggregation_config = AggregationConfig::new(num_leaf_proofs); if proof_contexts.len() > aggregation_config.num_leaf_proofs { return Err(format!( - "Too many proofs: {} provided, max {} for depth={} branching_factor={}", + "Too many proofs: {} provided, max {}", proof_contexts.len(), aggregation_config.num_leaf_proofs, - depth, - branching_factor )); } - let mut aggregator = - WormholeProofAggregator::new(verifier.circuit_data).with_config(aggregation_config); + let mut aggregator = WormholeProofAggregator::from_circuit_config(config, aggregation_config); for (idx, ctx) in proof_contexts.into_iter().enumerate() { println!(" Adding proof {} to aggregator...", idx + 1); println!(" Public inputs:"); println!(" asset_id: {}", ctx.public_inputs.asset_id); - println!(" output_amount: {}", ctx.public_inputs.output_amount); + println!(" output_amount_1: {}", ctx.public_inputs.output_amount_1); + println!(" output_amount_2: {}", ctx.public_inputs.output_amount_2); println!(" volume_fee_bps: {}", ctx.public_inputs.volume_fee_bps); println!(" nullifier: {:?}", ctx.public_inputs.nullifier); - println!(" exit_account: {:?}", ctx.public_inputs.exit_account); + println!(" exit_account_1: {:?}", ctx.public_inputs.exit_account_1); + println!(" exit_account_2: {:?}", ctx.public_inputs.exit_account_2); println!(" block_hash: {:?}", ctx.public_inputs.block_hash); - println!(" parent_hash: {:?}", ctx.public_inputs.parent_hash); + println!(" block_number: {:?}", ctx.public_inputs.block_number); println!(" block_number: {}", ctx.public_inputs.block_number); aggregator .push_proof(ctx.proof) @@ -547,7 +546,8 @@ fn aggregate_proofs( let aggregated_result = aggregator.aggregate().map_err(|e| format!("Aggregation failed: {}", e))?; - let public_inputs = AggregatedPublicCircuitInputs::try_from_slice( + use qp_wormhole_circuit::inputs::ParseAggregatedPublicInputs; + let public_inputs = AggregatedPublicCircuitInputs::try_from_felts( aggregated_result.proof.public_inputs.as_slice(), ) .map_err(|e| format!("Failed to parse aggregated public inputs: {}", e))?; @@ -696,8 +696,8 @@ async fn test_single_proof_on_chain_verification() { .expect("Failed to generate proof"); println!( - " Public inputs - output_amount: {}, nullifier: {:?}", - proof_context.public_inputs.output_amount, proof_context.public_inputs.nullifier + " Public inputs - output_amount_1: {}, nullifier: {:?}", + proof_context.public_inputs.output_amount_1, proof_context.public_inputs.nullifier ); // Submit for on-chain verification @@ -723,7 +723,7 @@ async fn test_single_proof_on_chain_verification() { // Compute expected miner fee from public inputs (single proof) let fee_bps = proof_context.public_inputs.volume_fee_bps; - let exit_u128 = (proof_context.public_inputs.output_amount as u128) * SCALE_DOWN_FACTOR; + let exit_u128 = (proof_context.public_inputs.output_amount_1 as u128) * SCALE_DOWN_FACTOR; let expected_miner_fee = expected_miner_fee_u128(exit_u128, fee_bps); assert!(expected_miner_fee > 0, "expected miner fee should be > 0"); @@ -828,8 +828,8 @@ async fn test_aggregated_proof_on_chain_verification() { .expect("Failed to generate proof"); println!( - " Public inputs - output_amount: {}, nullifier: {:?}", - proof_context.public_inputs.output_amount, proof_context.public_inputs.nullifier + " Public inputs - output_amount_1: {}, nullifier: {:?}", + proof_context.public_inputs.output_amount_1, proof_context.public_inputs.nullifier ); proof_contexts.push(proof_context); @@ -839,8 +839,7 @@ async fn test_aggregated_proof_on_chain_verification() { println!("\n4. Aggregating {} proofs...", proof_contexts.len()); let aggregated_context = aggregate_proofs( proof_contexts, - 1, // depth - 2, // branching_factor (2^1 = 2 max proofs) + 2, // num_leaf_proofs ) .expect("Failed to aggregate proofs"); @@ -956,7 +955,7 @@ async fn test_single_proof_exit_account_verification() { // Verify the exit account in public inputs matches what we specified let exit_account_from_proof: [u8; 32] = proof_context .public_inputs - .exit_account + .exit_account_1 .as_ref() .try_into() .expect("Exit account should be 32 bytes"); @@ -1137,7 +1136,7 @@ async fn test_full_wormhole_workflow() { // Step 3: Aggregate and verify println!("\n--- Step 3: Aggregation ---"); let aggregated = - aggregate_proofs(proofs_for_aggregation, 1, 2).expect("Failed to aggregate proofs"); + aggregate_proofs(proofs_for_aggregation, 2).expect("Failed to aggregate proofs"); println!(" Verifying aggregated proof on-chain..."); submit_aggregated_proof_for_verification(&quantus_client, aggregated.proof_bytes)