diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6db02669e3c..16c6c5b4f08 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -264,7 +264,11 @@ jobs: - name: Pin the syn and regex dependencies run: | cd fuzz && cargo update -p regex --precise "1.9.6" && cargo update -p syn --precise "2.0.106" && cargo update -p quote --precise "1.0.41" + cargo update -p proc-macro2 --precise "1.0.103" --verbose && cargo update -p serde_json --precise "1.0.145" --verbose + cargo update -p itoa --precise "1.0.15" --verbose && cargo update -p ryu --precise "1.0.20" --verbose cd write-seeds && cargo update -p regex --precise "1.9.6" && cargo update -p syn --precise "2.0.106" && cargo update -p quote --precise "1.0.41" + cargo update -p proc-macro2 --precise "1.0.103" --verbose && cargo update -p serde_json --precise "1.0.145" --verbose + cargo update -p itoa --precise "1.0.15" --verbose && cargo update -p ryu --precise "1.0.20" --verbose - name: Sanity check fuzz targets on Rust ${{ env.TOOLCHAIN }} run: | cd fuzz diff --git a/CHANGELOG.md b/CHANGELOG.md index a51c5fda8bd..b8d85210db9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +# 0.2.1 - Jan 26, 2025 - "Electrum Confirmations Logged" + +## API Updates + * The `AttributionData` struct is now public, correcting an issue where it was + accidentally sealed preventing construction of some messages (#4268). + * The async background processor now exits even if work remains to be done as + soon as the sleeper returns the exit flag (#4259). + +## Bug Fixes + * The presence of unconfirmed transactions no longer causes + `ElectrumSyncClient` to spuriously fail to sync (#4341). + * `ChannelManager::splice_channel` now properly fails immediately if the + peer does not support splicing (#4262, #4274). + * A spurious debug assertion was removed which could fail in cases where an + HTLC fails to be forwarded after being accepted (#4312). + * Many log calls related to outbound payments were corrected to include a + `payment_hash` field (#4342). + + # 0.2 - Dec 2, 2025 - "Natively Asynchronous Splicing" ## API Updates diff --git a/ci/check-lint.sh b/ci/check-lint.sh index bd4df3b85b8..11c6f083dd0 100755 --- a/ci/check-lint.sh +++ b/ci/check-lint.sh @@ -13,6 +13,7 @@ CLIPPY() { -A clippy::unwrap-or-default \ -A clippy::upper_case_acronyms \ -A clippy::swap-with-temporary \ + -A clippy::assertions-on-constants \ `# Things where we do odd stuff on purpose ` \ -A clippy::unusual_byte_groupings \ -A clippy::unit_arg \ diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index f3ecc72806a..eb7dfd58826 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -17,6 +17,18 @@ function PIN_RELEASE_DEPS { # quote 1.0.42 requires rustc 1.68.0 [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose + # Starting with version 1.0.104, the `proc-macro2` crate has an MSRV of rustc 1.68 + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p proc-macro2 --precise "1.0.103" --verbose + + # Starting with version 1.0.146, the `serde_json` crate has an MSRV of rustc 1.68 + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p serde_json --precise "1.0.145" --verbose + + # Starting with version 1.0.16, the `itoa` crate has an MSRV of rustc 1.68 + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p itoa --precise "1.0.15" --verbose + + # Starting with version 1.0.21, the `ryu` crate has an MSRV of rustc 1.68 + [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p ryu --precise "1.0.20" --verbose + return 0 # Don't fail the script if our rustc is higher than the last check } @@ -58,6 +70,7 @@ pushd lightning-tests [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p proc-macro2 --precise "1.0.103" --verbose cargo test popd @@ -130,6 +143,10 @@ echo -e "\n\nTesting no_std build on a downstream no-std crate" pushd no-std-check [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p syn --precise "2.0.106" --verbose [ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p quote --precise "1.0.41" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p proc-macro2 --precise "1.0.103" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p serde_json --precise "1.0.145" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p itoa --precise "1.0.15" --verbose +[ "$RUSTC_MINOR_VERSION" -lt 68 ] && cargo update -p ryu --precise "1.0.20" --verbose cargo check --verbose --color always [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd diff --git a/fuzz/src/process_onion_failure.rs b/fuzz/src/process_onion_failure.rs index 1bc9900718a..ac70562c006 100644 --- a/fuzz/src/process_onion_failure.rs +++ b/fuzz/src/process_onion_failure.rs @@ -9,10 +9,12 @@ use lightning::{ ln::{ channelmanager::{HTLCSource, PaymentId}, msgs::OnionErrorPacket, + onion_utils, }, routing::router::{BlindedTail, Path, RouteHop, TrampolineHop}, types::features::{ChannelFeatures, NodeFeatures}, util::logger::Logger, + util::ser::Readable, }; // Imports that need to be added manually @@ -126,19 +128,18 @@ fn do_test(data: &[u8], out: Out) { let failure_data = get_slice!(failure_len); let attribution_data = if get_bool!() { - Some(lightning::ln::AttributionData { - hold_times: get_slice!(80).try_into().unwrap(), - hmacs: get_slice!(840).try_into().unwrap(), - }) + let mut bytes = get_slice!(80 + 840); + let data: onion_utils::AttributionData = Readable::read(&mut bytes).unwrap(); + Some(data) } else { None }; let encrypted_packet = OnionErrorPacket { data: failure_data.into(), attribution_data: attribution_data.clone() }; - lightning::ln::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); + onion_utils::process_onion_failure(&secp_ctx, &logger, &htlc_source, encrypted_packet); if let Some(attribution_data) = attribution_data { - lightning::ln::decode_fulfill_attribution_data( + onion_utils::decode_fulfill_attribution_data( &secp_ctx, &logger, &path, diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 19333c5823a..31e519b9f57 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -476,11 +476,11 @@ pub(crate) mod futures_util { use core::pin::Pin; use core::task::{Poll, RawWaker, RawWakerVTable, Waker}; pub(crate) struct Selector< - A: Future + Unpin, + A: Future + Unpin, B: Future + Unpin, C: Future + Unpin, D: Future + Unpin, - E: Future + Unpin, + E: Future + Unpin, > { pub a: A, pub b: B, @@ -490,28 +490,30 @@ pub(crate) mod futures_util { } pub(crate) enum SelectorOutput { - A, + A(bool), B, C, D, - E(bool), + E, } impl< - A: Future + Unpin, + A: Future + Unpin, B: Future + Unpin, C: Future + Unpin, D: Future + Unpin, - E: Future + Unpin, + E: Future + Unpin, > Future for Selector { type Output = SelectorOutput; fn poll( mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>, ) -> Poll { + // Bias the selector so it first polls the sleeper future, allowing to exit immediately + // if the flag is set. match Pin::new(&mut self.a).poll(ctx) { - Poll::Ready(()) => { - return Poll::Ready(SelectorOutput::A); + Poll::Ready(res) => { + return Poll::Ready(SelectorOutput::A(res)); }, Poll::Pending => {}, } @@ -534,8 +536,8 @@ pub(crate) mod futures_util { Poll::Pending => {}, } match Pin::new(&mut self.e).poll(ctx) { - Poll::Ready(res) => { - return Poll::Ready(SelectorOutput::E(res)); + Poll::Ready(()) => { + return Poll::Ready(SelectorOutput::E); }, Poll::Pending => {}, } @@ -1037,15 +1039,15 @@ where (false, false) => FASTEST_TIMER, }; let fut = Selector { - a: channel_manager.get_cm().get_event_or_persistence_needed_future(), - b: chain_monitor.get_update_future(), - c: om_fut, - d: lm_fut, - e: sleeper(sleep_delay), + a: sleeper(sleep_delay), + b: channel_manager.get_cm().get_event_or_persistence_needed_future(), + c: chain_monitor.get_update_future(), + d: om_fut, + e: lm_fut, }; match fut.await { - SelectorOutput::A | SelectorOutput::B | SelectorOutput::C | SelectorOutput::D => {}, - SelectorOutput::E(exit) => { + SelectorOutput::B | SelectorOutput::C | SelectorOutput::D | SelectorOutput::E => {}, + SelectorOutput::A(exit) => { if exit { break; } diff --git a/lightning-macros/Cargo.toml b/lightning-macros/Cargo.toml index 546c4de5129..a7a5eabf589 100644 --- a/lightning-macros/Cargo.toml +++ b/lightning-macros/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-macros" -version = "0.2.0" +version = "0.2.1" authors = ["Elias Rohrer"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" diff --git a/lightning-macros/src/lib.rs b/lightning-macros/src/lib.rs index e784acf72fb..e1b5f278d79 100644 --- a/lightning-macros/src/lib.rs +++ b/lightning-macros/src/lib.rs @@ -11,22 +11,20 @@ //! Proc macros used by LDK -#![cfg_attr(not(test), no_std)] #![deny(missing_docs)] #![forbid(unsafe_code)] #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] #![cfg_attr(docsrs, feature(doc_cfg))] -extern crate alloc; +use std::string::ToString; -use alloc::string::ToString; use proc_macro::{Delimiter, Group, TokenStream, TokenTree}; use proc_macro2::TokenStream as TokenStream2; use quote::quote; +use syn::parse::Parser; use syn::spanned::Spanned; -use syn::{parse, ImplItemFn, Token}; -use syn::{parse_macro_input, Item}; +use syn::{parse, parse_macro_input, ImplItemFn, Item, Token}; fn add_async_method(mut parsed: ImplItemFn) -> TokenStream { let output = quote! { @@ -400,3 +398,357 @@ pub fn xtest_inventory(_input: TokenStream) -> TokenStream { TokenStream::from(expanded) } + +fn add_logs_to_macro_call(m: &mut syn::Macro) { + // Many macros just take a bunch of exprs as arguments, separated by commas. + // In that case, we optimistically edit the args as if they're exprs. + let parser = syn::punctuated::Punctuated::::parse_terminated; + if let Ok(mut parsed) = parser.parse(m.tokens.clone().into()) { + for expr in parsed.iter_mut() { + add_logs_to_self_exprs(expr); + } + m.tokens = quote! { #parsed }.into(); + } +} + +fn add_logs_to_stmt_list(s: &mut Vec) { + for stmt in s.iter_mut() { + match stmt { + syn::Stmt::Expr(ref mut expr, _) => add_logs_to_self_exprs(expr), + syn::Stmt::Local(syn::Local { init, .. }) => { + if let Some(l) = init { + add_logs_to_self_exprs(&mut *l.expr); + if let Some((_, e)) = &mut l.diverge { + add_logs_to_self_exprs(&mut *e); + } + } + }, + syn::Stmt::Macro(m) => add_logs_to_macro_call(&mut m.mac), + syn::Stmt::Item(syn::Item::Fn(f)) => { + add_logs_to_stmt_list(&mut f.block.stmts); + }, + syn::Stmt::Item(_) => {}, + } + } +} + +fn add_logs_to_self_exprs(e: &mut syn::Expr) { + match e { + syn::Expr::Array(e) => { + for elem in e.elems.iter_mut() { + add_logs_to_self_exprs(elem); + } + }, + syn::Expr::Assign(e) => { + add_logs_to_self_exprs(&mut *e.left); + add_logs_to_self_exprs(&mut *e.right); + }, + syn::Expr::Async(e) => { + add_logs_to_stmt_list(&mut e.block.stmts); + }, + syn::Expr::Await(e) => { + add_logs_to_self_exprs(&mut *e.base); + }, + syn::Expr::Binary(e) => { + add_logs_to_self_exprs(&mut *e.left); + add_logs_to_self_exprs(&mut *e.right); + }, + syn::Expr::Block(e) => { + add_logs_to_stmt_list(&mut e.block.stmts); + }, + syn::Expr::Break(e) => { + if let Some(e) = e.expr.as_mut() { + add_logs_to_self_exprs(&mut *e); + } + }, + syn::Expr::Call(e) => { + let mut needs_log_param = false; + if let syn::Expr::Path(p) = &*e.func { + if p.path.segments.len() == 2 && p.path.segments[0].ident == "Self" { + needs_log_param = true; + } + } else { + add_logs_to_self_exprs(&mut *e.func); + } + for a in e.args.iter_mut() { + add_logs_to_self_exprs(a); + } + if needs_log_param { + e.args.push(parse(quote!(logger).into()).unwrap()); + } + }, + syn::Expr::Cast(e) => { + add_logs_to_self_exprs(&mut *e.expr); + }, + syn::Expr::Closure(e) => { + add_logs_to_self_exprs(&mut *e.body); + }, + syn::Expr::Const(_) => {}, + syn::Expr::Continue(_) => {}, + syn::Expr::Field(e) => { + add_logs_to_self_exprs(&mut *e.base); + }, + syn::Expr::ForLoop(e) => { + add_logs_to_self_exprs(&mut *e.expr); + add_logs_to_stmt_list(&mut e.body.stmts); + }, + syn::Expr::Group(e) => { + add_logs_to_self_exprs(&mut *e.expr); + }, + syn::Expr::If(e) => { + add_logs_to_self_exprs(&mut *e.cond); + add_logs_to_stmt_list(&mut e.then_branch.stmts); + if let Some((_, branch)) = e.else_branch.as_mut() { + add_logs_to_self_exprs(&mut *branch); + } + }, + syn::Expr::Index(e) => { + add_logs_to_self_exprs(&mut *e.expr); + add_logs_to_self_exprs(&mut *e.index); + }, + syn::Expr::Infer(_) => {}, + syn::Expr::Let(e) => { + add_logs_to_self_exprs(&mut *e.expr); + }, + syn::Expr::Lit(_) => {}, + syn::Expr::Loop(e) => { + add_logs_to_stmt_list(&mut e.body.stmts); + }, + syn::Expr::Macro(e) => { + add_logs_to_macro_call(&mut e.mac); + }, + syn::Expr::Match(e) => { + add_logs_to_self_exprs(&mut *e.expr); + for arm in e.arms.iter_mut() { + if let Some((_, e)) = arm.guard.as_mut() { + add_logs_to_self_exprs(&mut *e); + } + add_logs_to_self_exprs(&mut *arm.body); + } + }, + syn::Expr::MethodCall(e) => { + match &*e.receiver { + syn::Expr::Path(path) => { + assert_eq!( + path.path.segments.len(), + 1, + "Multiple segments should instead be parsed as a Field, below" + ); + let is_self_call = path.qself.is_none() + && path.path.segments.len() == 1 + && path.path.segments[0].ident == "self"; + if is_self_call { + e.args.push(parse(quote!(logger).into()).unwrap()); + } + }, + _ => add_logs_to_self_exprs(&mut *e.receiver), + } + for a in e.args.iter_mut() { + add_logs_to_self_exprs(a); + } + }, + syn::Expr::Paren(e) => { + add_logs_to_self_exprs(&mut *e.expr); + }, + syn::Expr::Path(_) => {}, + syn::Expr::Range(e) => { + if let Some(start) = e.start.as_mut() { + add_logs_to_self_exprs(start); + } + if let Some(end) = e.end.as_mut() { + add_logs_to_self_exprs(end); + } + }, + syn::Expr::RawAddr(e) => { + add_logs_to_self_exprs(&mut *e.expr); + }, + syn::Expr::Reference(e) => { + add_logs_to_self_exprs(&mut *e.expr); + }, + syn::Expr::Repeat(e) => { + add_logs_to_self_exprs(&mut *e.expr); + add_logs_to_self_exprs(&mut *e.len); + }, + syn::Expr::Return(e) => { + if let Some(e) = e.expr.as_mut() { + add_logs_to_self_exprs(&mut *e); + } + }, + syn::Expr::Struct(e) => { + for field in e.fields.iter_mut() { + add_logs_to_self_exprs(&mut field.expr); + } + if let Some(rest) = e.rest.as_mut() { + add_logs_to_self_exprs(rest); + } + }, + syn::Expr::Try(e) => { + add_logs_to_self_exprs(&mut *e.expr); + }, + syn::Expr::TryBlock(e) => { + add_logs_to_stmt_list(&mut e.block.stmts); + }, + syn::Expr::Tuple(e) => { + for elem in e.elems.iter_mut() { + add_logs_to_self_exprs(elem); + } + }, + syn::Expr::Unary(e) => { + add_logs_to_self_exprs(&mut *e.expr); + }, + syn::Expr::Unsafe(e) => { + add_logs_to_stmt_list(&mut e.block.stmts); + }, + syn::Expr::Verbatim(_) => { + panic!("Need to update syn to parse some new syntax"); + }, + syn::Expr::While(e) => { + add_logs_to_self_exprs(&mut *e.cond); + add_logs_to_stmt_list(&mut e.body.stmts); + }, + syn::Expr::Yield(e) => { + if let Some(e) = e.expr.as_mut() { + add_logs_to_self_exprs(&mut *e); + } + }, + _ => {}, + } +} + +/// This attribute, on an `impl` block, will add logging parameters transparently to every method +/// in the `impl` block. It will also pass through the current logger to any calls to method calls +/// of the form `self.*()`. +/// +/// Provided attributes should be in the form `logger: LoggerType` where `LoggerType` is the type +/// of the logger object which is required. +/// +/// For example, this translates: +/// ```rust +/// use std::ops::Deref; +/// +/// struct B; +/// struct A { field_b: B } +/// +/// trait Logger {} +/// +/// struct LogType(L); +/// impl LogType { +/// fn log(&self) {} +/// } +/// +/// #[lightning_macros::add_logging(LogType)] +/// impl A { +/// fn f_a(&self) { +/// logger.log(); +/// } +/// fn f(&self) { +/// self.f_a(); +/// self.field_b.f(logger); // Note you'll have to pass the logger manually here +/// } +/// } +/// +/// #[lightning_macros::add_logging(LogType)] +/// impl B { +/// fn f(&self) { +/// logger.log(); +/// } +/// } +/// ``` +/// +/// to this: +/// +/// ```rust +/// use std::ops::Deref; +/// +/// struct B; +/// struct A { field_b: B } +/// +/// trait Logger {} +/// +/// struct LogType(L); +/// impl LogType { +/// fn log(&self) {} +/// } +/// +/// impl A { +/// fn f_a(&self, logger: &LogType) where L::Target: Logger { +/// logger.log(); +/// } +/// fn f(&self, logger: &LogType) where L::Target: Logger { +/// self.f_a(logger); +/// self.field_b.f(logger); // Note you'll have to pass the logger manually here +/// } +/// } +/// +/// impl B { +/// fn f(&self, logger: &LogType) where L::Target: Logger { +/// logger.log(); +/// } +/// } +/// ``` +#[proc_macro_attribute] +pub fn add_logging(attrs: TokenStream, expr: TokenStream) -> TokenStream { + let mut im = if let Ok(parsed) = parse::(expr) { + if let syn::Item::Impl(im) = parsed { + im + } else { + return (quote! { + compile_error!("add_logging can only be used on impl items") + }) + .into(); + } + } else { + return (quote! { + compile_error!("add_logging can only be used on impl items") + }) + .into(); + }; + + let logger_type = if let Ok(ty) = parse::(attrs) { + ty + } else { + return (quote! { + compile_error!("add_logging's attributes must be in the form `LoggerType`") + }) + .into(); + }; + + for item in im.items.iter_mut() { + if let syn::ImplItem::Fn(f) = item { + // In some rare cases, manually taking a logger may be required to specify appropriate + // lifetimes. Detect such cases and avoid adding a redundant logger. + let have_logger = f.sig.inputs.iter().any(|inp| { + if let syn::FnArg::Typed(syn::PatType { pat, .. }) = inp { + if let syn::Pat::Ident(syn::PatIdent { ident, .. }) = &**pat { + ident == "logger" + } else { + false + } + } else { + false + } + }); + if !have_logger { + if f.sig.generics.lt_token.is_none() { + f.sig.generics.lt_token = Some(Default::default()); + f.sig.generics.gt_token = Some(Default::default()); + } + f.sig.generics.params.push(parse(quote!(L: Deref).into()).unwrap()); + if f.sig.generics.where_clause.is_none() { + f.sig.generics.where_clause = Some(parse(quote!(where).into()).unwrap()); + } + let log_bound = parse(quote!(L::Target: Logger).into()).unwrap(); + f.sig.generics.where_clause.as_mut().unwrap().predicates.push(log_bound); + f.sig.inputs.push(parse(quote!(logger: &#logger_type).into()).unwrap()); + } + } + } + + for item in im.items.iter_mut() { + if let syn::ImplItem::Fn(f) = item { + add_logs_to_stmt_list(&mut f.block.stmts); + } + } + + quote! { #im }.into() +} diff --git a/lightning-transaction-sync/Cargo.toml b/lightning-transaction-sync/Cargo.toml index 2e6a99810ff..30908cabdb7 100644 --- a/lightning-transaction-sync/Cargo.toml +++ b/lightning-transaction-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-transaction-sync" -version = "0.2.0" +version = "0.2.1" authors = ["Elias Rohrer"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" diff --git a/lightning-transaction-sync/src/electrum.rs b/lightning-transaction-sync/src/electrum.rs index 47489df69bb..1162b9c00c9 100644 --- a/lightning-transaction-sync/src/electrum.rs +++ b/lightning-transaction-sync/src/electrum.rs @@ -336,10 +336,21 @@ where script_history.iter().filter(|h| h.tx_hash == **txid); if let Some(history) = filtered_history.next() { let prob_conf_height = history.height as u32; + if prob_conf_height <= 0 { + // Skip if it's a an unconfirmed entry. + continue; + } let confirmed_tx = self.get_confirmed_tx(tx, prob_conf_height)?; confirmed_txs.push(confirmed_tx); } - debug_assert!(filtered_history.next().is_none()); + if filtered_history.next().is_some() { + log_error!( + self.logger, + "Failed due to server returning multiple history entries for Tx {}.", + txid + ); + return Err(InternalError::Failed); + } } for (watched_output, script_history) in @@ -347,6 +358,7 @@ where { for possible_output_spend in script_history { if possible_output_spend.height <= 0 { + // Skip if it's a an unconfirmed entry. continue; } diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 7aa869a18bb..55e4b40144f 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.2.0" +version = "0.2.1" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" @@ -35,7 +35,7 @@ default = ["std", "grind_signatures"] [dependencies] lightning-types = { version = "0.3.0", path = "../lightning-types", default-features = false } lightning-invoice = { version = "0.34.0", path = "../lightning-invoice", default-features = false } -lightning-macros = { version = "0.2.0", path = "../lightning-macros" } +lightning-macros = { version = "0.2.1", path = "../lightning-macros" } bech32 = { version = "0.11.0", default-features = false } bitcoin = { version = "0.32.2", default-features = false, features = ["secp-recovery"] } @@ -53,7 +53,7 @@ inventory = { version = "0.3", optional = true } [dev-dependencies] regex = "1.5.6" lightning-types = { version = "0.3.0", path = "../lightning-types", features = ["_test_utils"] } -lightning-macros = { version = "0.2.0", path = "../lightning-macros" } +lightning-macros = { version = "0.2.1", path = "../lightning-macros" } parking_lot = { version = "0.12", default-features = false } [dev-dependencies.bitcoin] diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index ab695ee3530..e52b05036ec 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -66,7 +66,7 @@ use crate::sign::{ use crate::types::features::ChannelTypeFeatures; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::byte_utils; -use crate::util::logger::{Logger, Record}; +use crate::util::logger::{Logger, WithContext}; use crate::util::persist::MonitorName; use crate::util::ser::{ MaybeReadable, Readable, ReadableArgs, RequiredWrapper, UpgradableRequired, Writeable, Writer, @@ -1825,45 +1825,27 @@ macro_rules! _process_events_body { } pub(super) use _process_events_body as process_events_body; -pub(crate) struct WithChannelMonitor<'a, L: Deref> -where - L::Target: Logger, -{ - logger: &'a L, - peer_id: Option, - channel_id: Option, - payment_hash: Option, -} +pub(crate) struct WithChannelMonitor; -impl<'a, L: Deref> Logger for WithChannelMonitor<'a, L> -where - L::Target: Logger, -{ - fn log(&self, mut record: Record) { - record.peer_id = self.peer_id; - record.channel_id = self.channel_id; - record.payment_hash = self.payment_hash; - self.logger.log(record) - } -} - -impl<'a, L: Deref> WithChannelMonitor<'a, L> -where - L::Target: Logger, -{ - pub(crate) fn from( +impl WithChannelMonitor { + pub(crate) fn from<'a, L: Deref, S: EcdsaChannelSigner>( logger: &'a L, monitor: &ChannelMonitor, payment_hash: Option, - ) -> Self { + ) -> WithContext<'a, L> + where + L::Target: Logger, + { Self::from_impl(logger, &*monitor.inner.lock().unwrap(), payment_hash) } - #[rustfmt::skip] - pub(crate) fn from_impl(logger: &'a L, monitor_impl: &ChannelMonitorImpl, payment_hash: Option) -> Self { + pub(crate) fn from_impl<'a, L: Deref, S: EcdsaChannelSigner>( + logger: &'a L, monitor_impl: &ChannelMonitorImpl, payment_hash: Option, + ) -> WithContext<'a, L> + where + L::Target: Logger, + { let peer_id = Some(monitor_impl.counterparty_node_id); let channel_id = Some(monitor_impl.channel_id()); - WithChannelMonitor { - logger, peer_id, channel_id, payment_hash, - } + WithContext::from(logger, peer_id, channel_id, payment_hash) } } @@ -3829,7 +3811,7 @@ impl ChannelMonitorImpl { fn provide_payment_preimage( &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, payment_info: &Option, broadcaster: &B, - fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor) + fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -4006,7 +3988,7 @@ impl ChannelMonitorImpl { /// /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]: crate::chain::channelmonitor::ChannelMonitor::broadcast_latest_holder_commitment_txn pub(crate) fn queue_latest_holder_commitment_txn_for_broadcast( - &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor, + &mut self, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, require_funding_seen: bool, ) where @@ -4034,8 +4016,7 @@ impl ChannelMonitorImpl { } fn renegotiated_funding( - &mut self, logger: &WithChannelMonitor, - channel_parameters: &ChannelTransactionParameters, + &mut self, logger: &WithContext, channel_parameters: &ChannelTransactionParameters, alternative_holder_commitment_tx: &HolderCommitmentTransaction, alternative_counterparty_commitment_tx: &CommitmentTransaction, ) -> Result<(), ()> @@ -4210,7 +4191,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn update_monitor( - &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithChannelMonitor + &mut self, updates: &ChannelMonitorUpdate, broadcaster: &B, fee_estimator: &F, logger: &WithContext ) -> Result<(), ()> where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -5254,7 +5235,7 @@ impl ChannelMonitorImpl { /// Note that this includes possibly-locktimed-in-the-future transactions! #[rustfmt::skip] fn unsafe_get_latest_holder_commitment_txn( - &mut self, logger: &WithChannelMonitor + &mut self, logger: &WithContext ) -> Vec where L::Target: Logger { log_debug!(logger, "Getting signed copy of latest holder commitment transaction!"); let commitment_tx = { @@ -5307,7 +5288,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn block_connected( &mut self, header: &Header, txdata: &TransactionData, height: u32, broadcaster: B, - fee_estimator: F, logger: &WithChannelMonitor, + fee_estimator: F, logger: &WithContext, ) -> Vec where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -5327,7 +5308,7 @@ impl ChannelMonitorImpl { height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, + logger: &WithContext, ) -> Vec where B::Target: BroadcasterInterface, @@ -5360,7 +5341,7 @@ impl ChannelMonitorImpl { height: u32, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, + logger: &WithContext, ) -> Vec where B::Target: BroadcasterInterface, @@ -5648,7 +5629,7 @@ impl ChannelMonitorImpl { mut claimable_outpoints: Vec, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, + logger: &WithContext, ) -> Vec where B::Target: BroadcasterInterface, @@ -5868,7 +5849,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn blocks_disconnected( - &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithChannelMonitor + &mut self, fork_point: BestBlock, broadcaster: B, fee_estimator: F, logger: &WithContext ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -5921,7 +5902,7 @@ impl ChannelMonitorImpl { txid: &Txid, broadcaster: B, fee_estimator: &LowerBoundedFeeEstimator, - logger: &WithChannelMonitor, + logger: &WithContext, ) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -6032,7 +6013,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn should_broadcast_holder_commitment_txn( - &self, logger: &WithChannelMonitor + &self, logger: &WithContext ) -> Option where L::Target: Logger { // There's no need to broadcast our commitment transaction if we've seen one confirmed (even // with 1 confirmation) as it'll be rejected as duplicate/conflicting. @@ -6114,7 +6095,7 @@ impl ChannelMonitorImpl { /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC #[rustfmt::skip] fn is_resolving_htlc_output( - &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, ) where L::Target: Logger { let funding_spent = get_confirmed_funding_scope!(self); @@ -6371,7 +6352,7 @@ impl ChannelMonitorImpl { /// own. #[rustfmt::skip] fn check_tx_and_push_spendable_outputs( - &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithChannelMonitor, + &mut self, tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &WithContext, ) where L::Target: Logger { let funding_spent = get_confirmed_funding_scope!(self); for spendable_output in self.get_spendable_outputs(funding_spent, tx) { diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index b9c4b1ca1ef..d97ae6097b6 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -25,8 +25,9 @@ use crate::blinded_path::payment::{ use crate::chain::transaction; use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields}; +use crate::ln::msgs; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::types::ChannelId; -use crate::ln::{msgs, LocalHTLCFailureReason}; use crate::offers::invoice::Bolt12Invoice; use crate::offers::invoice_request::InvoiceRequest; use crate::offers::static_invoice::StaticInvoice; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e47b5492efd..289a241bc43 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -8876,7 +8876,6 @@ where update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), &self.context.channel_id); } else { - debug_assert!(htlcs_to_fail.is_empty()); let reason = if self.context.channel_state.is_local_stfu_sent() { "exits quiescence" } else if self.context.channel_state.is_monitor_update_in_progress() { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 82a45dcb1a6..8a557f0b1b2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2719,7 +2719,7 @@ pub struct ChannelManager< /// See `PendingOutboundPayment` documentation for more info. /// /// See `ChannelManager` struct-level documentation for lock order requirements. - pending_outbound_payments: OutboundPayments, + pending_outbound_payments: OutboundPayments, /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. /// @@ -3977,7 +3977,7 @@ where best_block: RwLock::new(params.best_block), outbound_scid_aliases: Mutex::new(new_hash_set()), - pending_outbound_payments: OutboundPayments::new(new_hash_map(), logger.clone()), + pending_outbound_payments: OutboundPayments::new(new_hash_map()), forward_htlcs: Mutex::new(new_hash_map()), decode_update_add_htlcs: Mutex::new(new_hash_map()), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }), @@ -4707,6 +4707,12 @@ where /// the channel. This will spend the channel's funding transaction output, effectively replacing /// it with a new one. /// + /// # Required Feature Flags + /// + /// Initiating a splice requires that the channel counterparty supports splicing. Any + /// channel (no matter the type) can be spliced, as long as the counterparty is currently + /// connected. + /// /// # Arguments /// /// Provide a `contribution` to determine if value is spliced in or out. The splice initiator is @@ -4767,8 +4773,17 @@ where Err(e) => return Err(e), }; - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + let mut peer_state = peer_state_mutex.lock().unwrap(); + if !peer_state.latest_features.supports_splicing() { + return Err(APIError::ChannelUnavailable { + err: "Peer does not support splicing".to_owned(), + }); + } + if !peer_state.latest_features.supports_quiescence() { + return Err(APIError::ChannelUnavailable { + err: "Peer does not support quiescence, a splicing prerequisite".to_owned(), + }); + } // Look for the channel match peer_state.channel_by_id.entry(*channel_id) { @@ -5384,11 +5399,12 @@ where }); if route.route_params.is_none() { route.route_params = Some(route_params.clone()); } let router = FixedRouter::new(route); + let logger = WithContext::from(&self.logger, None, None, Some(payment_hash)); self.pending_outbound_payments .send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0), route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, - &self.pending_events, |args| self.send_payment_along_path(args)) + &self.pending_events, |args| self.send_payment_along_path(args), &logger) } /// Sends a payment to the route found using the provided [`RouteParameters`], retrying failed @@ -5448,6 +5464,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, Some(payment_hash)), ) } @@ -5470,6 +5487,7 @@ where &self.node_signer, best_block_height, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, Some(payment_hash)), ) } @@ -5487,6 +5505,7 @@ where None, &self.entropy_source, best_block_height, + &WithContext::from(&self.logger, None, None, Some(payment_hash)), ) } @@ -5507,7 +5526,11 @@ where pub(crate) fn test_set_payment_metadata( &self, payment_id: PaymentId, new_payment_metadata: Option>, ) { - self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata); + self.pending_outbound_payments.test_set_payment_metadata( + payment_id, + new_payment_metadata, + &WithContext::from(&self.logger, None, None, None), + ); } /// Pays a [`Bolt11Invoice`] associated with the `payment_id`. See [`Self::send_payment`] for more info. @@ -5532,6 +5555,7 @@ where ) -> Result<(), Bolt11PaymentError> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); self.pending_outbound_payments.pay_for_bolt11_invoice( invoice, payment_id, @@ -5546,6 +5570,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, Some(payment_hash)), ) } @@ -5615,6 +5640,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, None), ) } @@ -5683,6 +5709,7 @@ where self.duration_since_epoch(), &*self.entropy_source, &self.pending_events, + &WithContext::from(&self.logger, None, None, None), ); match outbound_pmts_res { Ok(()) => {}, @@ -5797,6 +5824,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, None), ) } @@ -5849,7 +5877,13 @@ where fn abandon_payment_with_reason(&self, payment_id: PaymentId, reason: PaymentFailureReason) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - self.pending_outbound_payments.abandon_payment(payment_id, reason, &self.pending_events); + + self.pending_outbound_payments.abandon_payment( + payment_id, + reason, + &self.pending_events, + &WithContext::from(&self.logger, None, None, None), + ); } /// Send a spontaneous payment, which is a payment that does not require the recipient to have @@ -5875,6 +5909,7 @@ where ) -> Result { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let payment_hash = payment_preimage.map(|preimage| preimage.into()); self.pending_outbound_payments.send_spontaneous_payment( payment_preimage, recipient_onion, @@ -5889,6 +5924,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, payment_hash), ) } @@ -5905,6 +5941,7 @@ where &self.node_signer, best_block_height, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, None), ) } @@ -7078,7 +7115,8 @@ where if !self.decode_update_add_htlcs.lock().unwrap().is_empty() { return true; } - if self.pending_outbound_payments.needs_abandon_or_retry() { + let logger = WithContext::from(&self.logger, None, None, None); + if self.pending_outbound_payments.needs_abandon_or_retry(&logger) { return true; } false @@ -7148,6 +7186,7 @@ where best_block_height, &self.pending_events, |args| self.send_payment_along_path(args), + &WithContext::from(&self.logger, None, None, None), ); if needs_persist { should_persist = NotifyOption::DoPersist; @@ -8350,8 +8389,11 @@ where self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64, ); - self.pending_outbound_payments - .remove_stale_payments(duration_since_epoch, &self.pending_events); + self.pending_outbound_payments.remove_stale_payments( + duration_since_epoch, + &self.pending_events, + &WithContext::from(&self.logger, None, None, None), + ); self.check_refresh_async_receive_offer_cache(true); @@ -8523,6 +8565,7 @@ where // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => { + let logger = WithContext::from(&self.logger, None, None, Some(*payment_hash)); self.pending_outbound_payments.fail_htlc( source, payment_hash, @@ -8534,6 +8577,7 @@ where &self.secp_ctx, &self.pending_events, &mut from_monitor_update_completion, + &logger, ); if let Some(update) = from_monitor_update_completion { // If `fail_htlc` didn't `take` the post-event action, we should go ahead and @@ -9169,7 +9213,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }); - self.pending_outbound_payments.finalize_claims(hold_times, &self.pending_events); + let logger = WithContext::from(&self.logger, None, None, None); + self.pending_outbound_payments.finalize_claims(hold_times, &self.pending_events, &logger); } fn claim_funds_internal( @@ -9214,6 +9259,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ from_onchain, &mut ev_completion_action, &self.pending_events, + &WithContext::from(&self.logger, None, None, Some(payment_preimage.into())), ); // If an event was generated, `claim_htlc` set `ev_completion_action` to None, if // not, we should go ahead and run it now (as the claim was duplicative), at least @@ -12738,9 +12784,10 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self); let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry); + let logger = WithContext::from(&$self.logger, None, None, None); $self.pending_outbound_payments .add_new_awaiting_invoice( - payment_id, expiration, retry_strategy, route_params_config, None, + payment_id, expiration, retry_strategy, route_params_config, None, &logger, ) .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?; @@ -12782,9 +12829,10 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self); let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry); + let logger = WithContext::from(&$self.logger, None, None, None); $self.pending_outbound_payments .add_new_awaiting_invoice( - payment_id, expiration, retry_strategy, route_params_config, None, + payment_id, expiration, retry_strategy, route_params_config, None, &logger, ) .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?; @@ -12919,6 +12967,7 @@ where optional_params.retry_strategy, optional_params.route_params_config, Some(retryable_invoice_request), + &WithContext::from(&self.logger, None, None, None), ) .map_err(|_| Bolt12SemanticError::DuplicatePaymentId) }; @@ -12948,6 +12997,7 @@ where optional_params.retry_strategy, optional_params.route_params_config, Some(retryable_invoice_request), + &WithContext::from(&self.logger, None, None, None), ) .map_err(|_| Bolt12SemanticError::DuplicatePaymentId) }; @@ -12990,6 +13040,7 @@ where optional_params.retry_strategy, optional_params.route_params_config, Some(retryable_invoice_request), + &WithContext::from(&self.logger, None, None, None), ) .map_err(|_| Bolt12SemanticError::DuplicatePaymentId) }; @@ -13158,6 +13209,7 @@ where optional_params.route_params_config, amount_msats, optional_params.payer_note, + &WithContext::from(&self.logger, None, None, None), )?; self.flow @@ -13481,12 +13533,14 @@ where #[cfg(test)] pub fn has_pending_payments(&self) -> bool { - self.pending_outbound_payments.has_pending_payments() + let logger = WithContext::from(&self.logger, None, None, None); + self.pending_outbound_payments.has_pending_payments(&logger) } #[cfg(test)] pub fn clear_pending_payments(&self) { - self.pending_outbound_payments.clear_pending_payments() + let logger = WithContext::from(&self.logger, None, None, None); + self.pending_outbound_payments.clear_pending_payments(&logger) } #[cfg(any(test, feature = "_test_utils"))] @@ -15155,8 +15209,9 @@ where } fn message_received(&self) { + let logger = WithContext::from(&self.logger, None, None, None); for (payment_id, retryable_invoice_request) in - self.pending_outbound_payments.release_invoice_requests_awaiting_invoice() + self.pending_outbound_payments.release_invoice_requests_awaiting_invoice(&logger) { let RetryableInvoiceRequest { invoice_request, nonce, .. } = retryable_invoice_request; @@ -15296,7 +15351,8 @@ where // Update the corresponding entry in `PendingOutboundPayment` for this invoice. // This ensures that event generation remains idempotent in case we receive // the same invoice multiple times. - self.pending_outbound_payments.mark_invoice_received(&invoice, payment_id).ok()?; + let logger = WithContext::from(&self.logger, None, None, None); + self.pending_outbound_payments.mark_invoice_received(&invoice, payment_id, &logger).ok()?; let event = Event::InvoiceReceived { payment_id, invoice, context, responder, @@ -15586,12 +15642,13 @@ where // offer, but tests can deal with that. offer = replacement_offer; } - if let Ok((amt_msats, payer_note)) = self.pending_outbound_payments.params_for_payment_awaiting_offer(payment_id) { + let logger = WithContext::from(&self.logger, None, None, None); + if let Ok((amt_msats, payer_note)) = self.pending_outbound_payments.params_for_payment_awaiting_offer(payment_id, &logger) { let offer_pay_res = self.pay_for_offer_intern(&offer, None, Some(amt_msats), payer_note, payment_id, Some(name), |retryable_invoice_request| { self.pending_outbound_payments - .received_offer(payment_id, Some(retryable_invoice_request)) + .received_offer(payment_id, Some(retryable_invoice_request), &logger) .map_err(|_| Bolt12SemanticError::DuplicatePaymentId) }); if offer_pay_res.is_err() { @@ -15601,7 +15658,7 @@ where // Note that the PaymentFailureReason should be ignored for an // AwaitingInvoice payment. self.pending_outbound_payments.abandon_payment( - payment_id, PaymentFailureReason::RouteNotFound, &self.pending_events, + payment_id, PaymentFailureReason::RouteNotFound, &self.pending_events, &logger, ); } } @@ -17151,8 +17208,7 @@ where } pending_outbound_payments = Some(outbounds); } - let pending_outbounds = - OutboundPayments::new(pending_outbound_payments.unwrap(), args.logger.clone()); + let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap()); for (peer_pubkey, peer_storage) in peer_storage_dir { if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) { @@ -17458,6 +17514,7 @@ where session_priv_bytes, &path, best_block_height, + &logger, ); } } @@ -17557,6 +17614,7 @@ where true, &mut compl_action, &pending_events, + &logger, ); // If the completion action was not consumed, then there was no // payment to claim, and we need to tell the `ChannelMonitor` @@ -17601,10 +17659,11 @@ where } } for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(payment_hash)); log_info!( - args.logger, - "Failing HTLC with payment hash {} as it was resolved on-chain.", - payment_hash + logger, + "Failing HTLC with payment hash {payment_hash} as it was resolved on-chain." ); let completion_action = Some(PaymentCompleteUpdate { counterparty_node_id: monitor.get_counterparty_node_id(), @@ -17671,6 +17730,11 @@ where // inbound edge of the payment's monitor has already claimed // the HTLC) we skip trying to replay the claim. let htlc_payment_hash: PaymentHash = payment_preimage.into(); + let logger = WithChannelMonitor::from( + &args.logger, + monitor, + Some(htlc_payment_hash), + ); let balance_could_incl_htlc = |bal| match bal { &Balance::ClaimableOnChannelClose { .. } => { // The channel is still open, assume we can still @@ -17693,7 +17757,7 @@ where // edge monitor but the channel is closed (and thus we'll // immediately panic if we call claim_funds_from_hop). if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() { - log_error!(args.logger, + log_error!(logger, "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\ All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1", htlc_payment_hash, @@ -17708,7 +17772,7 @@ where // of panicking at runtime. The user ideally should have read // the release notes and we wouldn't be here, but we go ahead // and let things run in the hope that it'll all just work out. - log_error!(args.logger, + log_error!(logger, "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\ As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\ All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\ diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 7af632e0351..bd4403fd3fe 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -971,7 +971,7 @@ pub fn get_revoke_commit_msgs>( assert_eq!(node_id, recipient); (*msg).clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event: {events:?}"), }, match events[1] { MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, ref updates } => { @@ -984,7 +984,7 @@ pub fn get_revoke_commit_msgs>( assert!(updates.commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); updates.commitment_signed.clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event: {events:?}"), }, ) } @@ -3486,7 +3486,7 @@ pub fn send_along_route_with_secret<'a, 'b, 'c>( payment_id } -fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { +pub fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) { let origin_node_id = expected_path[0].node.get_our_node_id(); // iterate from the receiving node to the origin node and handle update fail htlc. diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index c161a9664c0..36fb17ff076 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9848,3 +9848,154 @@ pub fn test_multi_post_event_actions() { do_test_multi_post_event_actions(true); do_test_multi_post_event_actions(false); } + +#[xtest(feature = "_externalize_tests")] +pub fn test_dust_exposure_holding_cell_assertion() { + // Test that we properly move forward if we pop an HTLC-add from the holding cell but fail to + // add it to the channel. In 0.2 this cause a (harmless in prod) debug assertion failure. We + // try to ensure that this won't happen by checking that an HTLC will be able to be added + // before we add it to the holding cell, so getting into this state takes a bit of work. + // + // Here we accomplish this by using the dust exposure limit. This has the unique feature that + // node C can increase node B's dust exposure on the B <-> C channel without B doing anything. + // To exploit this, we get node B one HTLC away from being over-exposed to dust, give it one + // more HTLC in the holding cell, then have node C add an HTLC. By the time the holding-cell + // HTLC is released we are at max-dust-exposure and will fail it. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + + // Configure nodes with specific dust limits + let mut config = test_default_channel_config(); + // Use a fixed dust exposure limit to make the test simpler + const DUST_HTLC_VALUE_MSAT: u64 = 500_000; + config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FixedLimitMsat(5_000_000); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + + let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + // Create channels: A <-> B <-> C + create_announced_chan_between_nodes(&nodes, 0, 1); + let bc_chan_id = create_announced_chan_between_nodes(&nodes, 1, 2).2; + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 10_000_000); + + // Send multiple dust HTLCs from B to C to approach the dust limit (including transaction fees) + for _ in 0..4 { + route_payment(&nodes[1], &[&nodes[2]], DUST_HTLC_VALUE_MSAT); + } + + // At this point we shouldn't be over the dust limit, and should still be able to send HTLCs. + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert_eq!( + bc_chan.next_outbound_htlc_minimum_msat, + config.channel_handshake_config.our_htlc_minimum_msat + ); + + // Add a further HTLC from B to C, but don't deliver the send messages. + // After this we'll only have the ability to add one more HTLC, but by not delivering the send + // messages (leaving B waiting on C's RAA) the next HTLC will go into B's holding cell. + let (route_bc, payment_hash_bc, _payment_preimage_bc, payment_secret_bc) = + get_route_and_payment_hash!(nodes[1], nodes[2], DUST_HTLC_VALUE_MSAT); + let onion_bc = RecipientOnionFields::secret_only(payment_secret_bc); + let id = PaymentId(payment_hash_bc.0); + nodes[1].node.send_payment_with_route(route_bc, payment_hash_bc, onion_bc, id).unwrap(); + check_added_monitors(&nodes[1], 1); + let send_bc = SendEvent::from_node(&nodes[1]); + + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert_eq!( + bc_chan.next_outbound_htlc_minimum_msat, + config.channel_handshake_config.our_htlc_minimum_msat + ); + + // Forward an additional HTLC from A through B to C. This will go in B's holding cell for node + // C as it is waiting on a response to the above messages. + let payment_params_ac = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + let (route_ac, payment_hash_cell, _, payment_secret_ac) = + get_route_and_payment_hash!(nodes[0], nodes[2], payment_params_ac, DUST_HTLC_VALUE_MSAT); + let onion_ac = RecipientOnionFields::secret_only(payment_secret_ac); + let id = PaymentId(payment_hash_cell.0); + nodes[0].node.send_payment_with_route(route_ac, payment_hash_cell, onion_ac, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let send_ab = SendEvent::from_node(&nodes[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_ab.msgs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &send_ab.commitment_msg, false, true); + + // At this point when we process pending forwards the HTLC will go into the holding cell and no + // further messages will be generated. Node B will also be at its maximum dust exposure and + // will refuse to send any dust HTLCs (when it includes the holding cell HTLC). + expect_and_process_pending_htlcs(&nodes[1], false); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert!(bc_chan.next_outbound_htlc_minimum_msat > DUST_HTLC_VALUE_MSAT); + + // Send an additional HTLC from C to B. This will make B unable to forward the HTLC already in + // its holding cell as it would be over-exposed to dust. + let (route_cb, payment_hash_cb, payment_preimage_cb, payment_secret_cb) = + get_route_and_payment_hash!(nodes[2], nodes[1], DUST_HTLC_VALUE_MSAT); + let onion_cb = RecipientOnionFields::secret_only(payment_secret_cb); + let id = PaymentId(payment_hash_cb.0); + nodes[2].node.send_payment_with_route(route_cb, payment_hash_cb, onion_cb, id).unwrap(); + check_added_monitors(&nodes[2], 1); + + // Now deliver all the messages and make sure that the HTLC is failed-back. + let send_event_cb = SendEvent::from_node(&nodes[2]); + nodes[1].node.handle_update_add_htlc(node_c_id, &send_event_cb.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event_cb.commitment_msg); + check_added_monitors(&nodes[1], 1); + + nodes[2].node.handle_update_add_htlc(node_b_id, &send_bc.msgs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_bc.commitment_msg); + check_added_monitors(&nodes[2], 1); + + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[1], 1); + let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_c_id); + + // When we delivered the RAA above, we attempted (and failed) to add the HTLC to the channel, + // causing it to be ready to fail-back, which we do here: + let next_hop = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: bc_chan_id }; + expect_htlc_forwarding_fails(&nodes[1], &[next_hop]); + check_added_monitors(&nodes[1], 1); + fail_payment_along_path(&[&nodes[0], &nodes[1]]); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], payment_hash_cell, false, conditions); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[2], 1); + let cs_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); + + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); + check_added_monitors(&nodes[2], 1); + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_cs.commitment_signed); + check_added_monitors(&nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[1], 1); + expect_and_process_pending_htlcs(&nodes[1], false); + expect_payment_claimable!(nodes[1], payment_hash_cb, payment_secret_cb, DUST_HTLC_VALUE_MSAT); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[2], 1); + + // Now that everything has settled, make sure the channels still work with a simple claim. + claim_payment(&nodes[2], &[&nodes[1]], payment_preimage_cb); +} diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 9473142cfed..2e6b279965f 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -41,7 +41,7 @@ pub mod channel; #[cfg(not(fuzzing))] pub(crate) mod channel; -pub(crate) mod onion_utils; +pub mod onion_utils; mod outbound_payment; pub mod wire; @@ -53,14 +53,6 @@ pub use onion_utils::{create_payment_onion, LocalHTLCFailureReason}; // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below. -#[cfg(fuzzing)] -pub use onion_utils::decode_fulfill_attribution_data; -#[cfg(fuzzing)] -pub use onion_utils::process_onion_failure; - -#[cfg(fuzzing)] -pub use onion_utils::AttributionData; - #[cfg(test)] #[allow(unused_mut)] mod async_payments_tests; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 354273f7170..81fa2fc41fa 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -4357,7 +4357,7 @@ mod tests { InboundOnionForwardPayload, InboundOnionReceivePayload, OutboundTrampolinePayload, TrampolineOnionPacket, }; - use crate::ln::onion_utils::{AttributionData, HMAC_COUNT, HMAC_LEN, HOLD_TIME_LEN, MAX_HOPS}; + use crate::ln::onion_utils::AttributionData; use crate::ln::types::ChannelId; use crate::routing::gossip::{NodeAlias, NodeId}; use crate::types::features::{ @@ -5890,13 +5890,10 @@ mod tests { channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, reason: [1; 32].to_vec(), - attribution_data: Some(AttributionData { - hold_times: [3; MAX_HOPS * HOLD_TIME_LEN], - hmacs: [3; HMAC_LEN * HMAC_COUNT], - }), + attribution_data: Some(AttributionData::new()), }; let encoded_value = update_fail_htlc.encode(); - let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0020010101010101010101010101010101010101010101010101010101010101010101fd03980303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303").unwrap(); + let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0020010101010101010101010101010101010101010101010101010101010101010101fd03980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); assert_eq!(encoded_value, target_value); } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 6bba2b59e10..735ac4012ab 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -7,6 +7,8 @@ // You may not use this file except in accordance with one or both of these // licenses. +//! Low-level onion manipulation logic and fields + use super::msgs::OnionErrorPacket; use crate::blinded_path::BlindedHop; use crate::crypto::chacha20::ChaCha20; @@ -979,49 +981,102 @@ mod fuzzy_onion_utils { #[cfg(test)] pub(crate) attribution_failed_channel: Option, } + + pub fn process_onion_failure( + secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, + encrypted_packet: OnionErrorPacket, + ) -> DecodedOnionFailure + where + L::Target: Logger, + { + let (path, primary_session_priv) = match htlc_source { + HTLCSource::OutboundRoute { ref path, ref session_priv, .. } => (path, session_priv), + _ => unreachable!(), + }; + + if path.has_trampoline_hops() { + // If we have Trampoline hops, the outer onion session_priv is a hash of the inner one. + let session_priv_hash = + Sha256::hash(&primary_session_priv.secret_bytes()).to_byte_array(); + let outer_session_priv = + SecretKey::from_slice(&session_priv_hash[..]).expect("You broke SHA-256!"); + process_onion_failure_inner( + secp_ctx, + logger, + path, + &outer_session_priv, + Some(primary_session_priv), + encrypted_packet, + ) + } else { + process_onion_failure_inner( + secp_ctx, + logger, + path, + primary_session_priv, + None, + encrypted_packet, + ) + } + } + + /// Decodes the attribution data that we got back from upstream on a payment we sent. + pub fn decode_fulfill_attribution_data( + secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, + mut attribution_data: AttributionData, + ) -> Vec + where + L::Target: Logger, + { + let mut hold_times = Vec::new(); + + // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. + let shared_secrets = + construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) + .map(|(shared_secret, _, _, _, _)| shared_secret); + + // Path length can reach 27 hops, but attribution data can only be conveyed back to the sender from the first 20 + // hops. Determine the number of hops to be used for attribution data. + let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); + + for (route_hop_idx, shared_secret) in + shared_secrets.enumerate().take(attributable_hop_count) + { + attribution_data.crypt(shared_secret.as_ref()); + + // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need + // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond + // the last attributable hop will not be available. + let position = attributable_hop_count - route_hop_idx - 1; + let res = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); + match res { + Ok(hold_time) => { + hold_times.push(hold_time); + + // Shift attribution data to prepare for processing the next hop. + attribution_data.shift_left(); + }, + Err(()) => { + // We will hit this if there is a node on the path that does not support fulfill attribution data. + log_debug!( + logger, + "Invalid fulfill HMAC in attribution data for node at pos {}", + route_hop_idx + ); + + break; + }, + } + } + + hold_times + } } #[cfg(fuzzing)] pub use self::fuzzy_onion_utils::*; #[cfg(not(fuzzing))] pub(crate) use self::fuzzy_onion_utils::*; -pub fn process_onion_failure( - secp_ctx: &Secp256k1, logger: &L, htlc_source: &HTLCSource, - encrypted_packet: OnionErrorPacket, -) -> DecodedOnionFailure -where - L::Target: Logger, -{ - let (path, primary_session_priv) = match htlc_source { - HTLCSource::OutboundRoute { ref path, ref session_priv, .. } => (path, session_priv), - _ => unreachable!(), - }; - - if path.has_trampoline_hops() { - // If we have Trampoline hops, the outer onion session_priv is a hash of the inner one. - let session_priv_hash = Sha256::hash(&primary_session_priv.secret_bytes()).to_byte_array(); - let outer_session_priv = - SecretKey::from_slice(&session_priv_hash[..]).expect("You broke SHA-256!"); - process_onion_failure_inner( - secp_ctx, - logger, - path, - &outer_session_priv, - Some(primary_session_priv), - encrypted_packet, - ) - } else { - process_onion_failure_inner( - secp_ctx, - logger, - path, - primary_session_priv, - None, - encrypted_packet, - ) - } -} - /// Process failure we got back from upstream on a payment we sent (implying htlc_source is an /// OutboundRoute). fn process_onion_failure_inner( @@ -1466,56 +1521,6 @@ where } } -/// Decodes the attribution data that we got back from upstream on a payment we sent. -pub fn decode_fulfill_attribution_data( - secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, - mut attribution_data: AttributionData, -) -> Vec -where - L::Target: Logger, -{ - let mut hold_times = Vec::new(); - - // Only consider hops in the regular path for attribution data. Blinded path attribution data isn't accessible. - let shared_secrets = - construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) - .map(|(shared_secret, _, _, _, _)| shared_secret); - - // Path length can reach 27 hops, but attribution data can only be conveyed back to the sender from the first 20 - // hops. Determine the number of hops to be used for attribution data. - let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); - - for (route_hop_idx, shared_secret) in shared_secrets.enumerate().take(attributable_hop_count) { - attribution_data.crypt(shared_secret.as_ref()); - - // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. We need - // to look at the chain of HMACs that does include all data up to the last attributable hop. Hold times beyond - // the last attributable hop will not be available. - let position = attributable_hop_count - route_hop_idx - 1; - let res = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); - match res { - Ok(hold_time) => { - hold_times.push(hold_time); - - // Shift attribution data to prepare for processing the next hop. - attribution_data.shift_left(); - }, - Err(()) => { - // We will hit this if there is a node on the path that does not support fulfill attribution data. - log_debug!( - logger, - "Invalid fulfill HMAC in attribution data for node at pos {}", - route_hop_idx - ); - - break; - }, - } - } - - hold_times -} - const BADONION: u16 = 0x8000; const PERM: u16 = 0x4000; const NODE: u16 = 0x2000; @@ -2520,6 +2525,7 @@ where } /// Build a payment onion, returning the first hop msat and cltv values as well. +/// /// `cur_block_height` should be set to the best known block height + 1. pub fn create_payment_onion( secp_ctx: &Secp256k1, path: &Path, session_priv: &SecretKey, total_msat: u64, @@ -2708,22 +2714,28 @@ fn decode_next_hop, N: NextPacketBytes>( } } -pub const HOLD_TIME_LEN: usize = 4; -pub const MAX_HOPS: usize = 20; -pub const HMAC_LEN: usize = 4; +pub(crate) const HOLD_TIME_LEN: usize = 4; +pub(crate) const MAX_HOPS: usize = 20; +pub(crate) const HMAC_LEN: usize = 4; // Define the number of HMACs in the attributable data block. For the first node, there are 20 HMACs, and then for every // subsequent node, the number of HMACs decreases by 1. 20 + 19 + 18 + ... + 1 = 20 * 21 / 2 = 210. -pub const HMAC_COUNT: usize = MAX_HOPS * (MAX_HOPS + 1) / 2; +pub(crate) const HMAC_COUNT: usize = MAX_HOPS * (MAX_HOPS + 1) / 2; #[derive(Clone, Debug, Hash, PartialEq, Eq)] +/// Attribution data allows the sender of an HTLC to identify which hop failed an HTLC robustly, +/// preventing earlier hops from corrupting the HTLC failure information (or at least allowing the +/// sender to identify the earliest hop which corrupted HTLC failure information). +/// +/// Additionally, it allows a sender to identify how long each hop along a path held an HTLC, with +/// 100ms granularity. pub struct AttributionData { - pub hold_times: [u8; MAX_HOPS * HOLD_TIME_LEN], - pub hmacs: [u8; HMAC_LEN * HMAC_COUNT], + hold_times: [u8; MAX_HOPS * HOLD_TIME_LEN], + hmacs: [u8; HMAC_LEN * HMAC_COUNT], } impl AttributionData { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { hold_times: [0; MAX_HOPS * HOLD_TIME_LEN], hmacs: [0; HMAC_LEN * HMAC_COUNT] } } } @@ -2772,7 +2784,7 @@ impl AttributionData { /// Writes the HMACs corresponding to the given position that have been added already by downstream hops. Position is /// relative to the final node. The final node is at position 0. - pub fn write_downstream_hmacs(&self, position: usize, w: &mut HmacEngine) { + pub(crate) fn write_downstream_hmacs(&self, position: usize, w: &mut HmacEngine) { // Set the index to the first downstream HMAC that we need to include. Note that we skip the first MAX_HOPS HMACs // because this is space reserved for the HMACs that we are producing for the current node. let mut hmac_idx = MAX_HOPS + MAX_HOPS - position - 1; diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 75fe55bfeac..f75c2dbaa87 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -837,22 +837,15 @@ pub(super) struct SendAlongPathArgs<'a> { pub hold_htlc_at_next_hop: bool, } -pub(super) struct OutboundPayments -where - L::Target: Logger, -{ +pub(super) struct OutboundPayments { pub(super) pending_outbound_payments: Mutex>, awaiting_invoice: AtomicBool, retry_lock: Mutex<()>, - logger: L, } -impl OutboundPayments -where - L::Target: Logger, -{ +impl OutboundPayments { pub(super) fn new( - pending_outbound_payments: HashMap, logger: L, + pending_outbound_payments: HashMap, ) -> Self { let has_invoice_requests = pending_outbound_payments.values().any(|payment| { matches!( @@ -867,10 +860,12 @@ where pending_outbound_payments: Mutex::new(pending_outbound_payments), awaiting_invoice: AtomicBool::new(has_invoice_requests), retry_lock: Mutex::new(()), - logger, } } +} +#[lightning_macros::add_logging(crate::util::logger::WithContext)] +impl OutboundPayments { #[rustfmt::skip] pub(super) fn send_payment( &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, @@ -1102,7 +1097,7 @@ where best_block_height, &send_payment_along_path ); log_info!( - self.logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, + logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, result ); if let Err(e) = result { @@ -1428,7 +1423,7 @@ where { #[cfg(feature = "std")] { if has_expired(&route_params) { - log_error!(self.logger, "Payment with id {} and hash {} had expired before we started paying", + log_error!(logger, "Payment with id {} and hash {} had expired before we started paying", payment_id, payment_hash); return Err(RetryableSendFailure::PaymentExpired) } @@ -1438,7 +1433,7 @@ where route_params, recipient_onion, keysend_preimage, invoice_request, best_block_height ) .map_err(|()| { - log_error!(self.logger, "Can't construct an onion packet without exceeding 1300-byte onion \ + log_error!(logger, "Can't construct an onion packet without exceeding 1300-byte onion \ hop_data length for payment with id {} and hash {}", payment_id, payment_hash); RetryableSendFailure::OnionPacketSizeExceeded })?; @@ -1448,7 +1443,7 @@ where Some(&first_hops.iter().collect::>()), inflight_htlcs(), payment_hash, payment_id, ).map_err(|_| { - log_error!(self.logger, "Failed to find route for payment with id {} and hash {}", + log_error!(logger, "Failed to find route for payment with id {} and hash {}", payment_id, payment_hash); RetryableSendFailure::RouteNotFound })?; @@ -1493,7 +1488,7 @@ where recipient_onion.clone(), payment_id, keysend_preimage, &route, Some(retry_strategy), Some(route_params.payment_params.clone()), entropy_source, best_block_height, None) .map_err(|_| { - log_error!(self.logger, "Payment with id {} is already pending. New payment had payment hash {}", + log_error!(logger, "Payment with id {} is already pending. New payment had payment hash {}", payment_id, payment_hash); RetryableSendFailure::DuplicatePayment })?; @@ -1501,7 +1496,7 @@ where let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, None, None, payment_id, None, &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); - log_info!(self.logger, "Sending payment with id {} and hash {} returned {:?}", + log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); if let Err(e) = res { self.handle_pay_route_err( @@ -1530,7 +1525,7 @@ where { #[cfg(feature = "std")] { if has_expired(&route_params) { - log_error!(self.logger, "Payment params expired on retry, abandoning payment {}", &payment_id); + log_error!(logger, "Payment params expired on retry, abandoning payment {}", &payment_id); self.abandon_payment(payment_id, PaymentFailureReason::PaymentExpired, pending_events); return } @@ -1543,7 +1538,7 @@ where ) { Ok(route) => route, Err(e) => { - log_error!(self.logger, "Failed to find a route on retry, abandoning payment {}: {:#?}", &payment_id, e); + log_error!(logger, "Failed to find a route on retry, abandoning payment {}: {:#?}", &payment_id, e); self.abandon_payment(payment_id, PaymentFailureReason::RouteNotFound, pending_events); return } @@ -1557,7 +1552,7 @@ where for path in route.paths.iter() { if path.hops.len() == 0 { - log_error!(self.logger, "Unusable path in route (path.hops.len() must be at least 1"); + log_error!(logger, "Unusable path in route (path.hops.len() must be at least 1"); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); return } @@ -1590,13 +1585,13 @@ where const RETRY_OVERFLOW_PERCENTAGE: u64 = 10; let retry_amt_msat = route.get_total_amount(); if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 { - log_error!(self.logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat); + log_error!(logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat); abandon_with_entry!(payment, PaymentFailureReason::UnexpectedError); return } if !payment.get().is_retryable_now() { - log_error!(self.logger, "Retries exhausted for payment id {}", &payment_id); + log_error!(logger, "Retries exhausted for payment id {}", &payment_id); abandon_with_entry!(payment, PaymentFailureReason::RetriesExhausted); return } @@ -1625,38 +1620,38 @@ where (total_msat, recipient_onion, keysend_preimage, onion_session_privs, invoice_request, bolt12_invoice.cloned()) }, PendingOutboundPayment::Legacy { .. } => { - log_error!(self.logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102"); + log_error!(logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102"); return }, PendingOutboundPayment::AwaitingInvoice { .. } | PendingOutboundPayment::AwaitingOffer { .. } => { - log_error!(self.logger, "Payment not yet sent"); + log_error!(logger, "Payment not yet sent"); debug_assert!(false); return }, PendingOutboundPayment::InvoiceReceived { .. } => { - log_error!(self.logger, "Payment already initiating"); + log_error!(logger, "Payment already initiating"); debug_assert!(false); return }, PendingOutboundPayment::StaticInvoiceReceived { .. } => { - log_error!(self.logger, "Payment already initiating"); + log_error!(logger, "Payment already initiating"); debug_assert!(false); return }, PendingOutboundPayment::Fulfilled { .. } => { - log_error!(self.logger, "Payment already completed"); + log_error!(logger, "Payment already completed"); return }, PendingOutboundPayment::Abandoned { .. } => { - log_error!(self.logger, "Payment already abandoned (with some HTLCs still pending)"); + log_error!(logger, "Payment already abandoned (with some HTLCs still pending)"); return }, } }, hash_map::Entry::Vacant(_) => { - log_error!(self.logger, "Payment with ID {} not found", &payment_id); + log_error!(logger, "Payment with ID {} not found", &payment_id); return } } @@ -1664,7 +1659,7 @@ where let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, invoice_request.as_ref(), bolt12_invoice.as_ref(), payment_id, Some(total_msat), &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); - log_info!(self.logger, "Result retrying payment id {}: {:?}", &payment_id, res); + log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { self.handle_pay_route_err( e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, @@ -1693,7 +1688,7 @@ where match err { PaymentSendFailure::AllFailedResendSafe(errs) => { self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); - Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), &self.logger, pending_events); + Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), pending_events); self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, pending_events, send_payment_along_path); }, PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => { @@ -1710,7 +1705,7 @@ where } }); self.remove_session_privs(payment_id, failed_paths); - Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), &self.logger, pending_events); + Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), pending_events); // Some paths were sent, even if we failed to send the full MPP value our recipient may // misbehave and claim the funds, at which point we have to consider the payment sent, so // return `Ok()` here, ignoring any retry errors. @@ -1722,13 +1717,13 @@ where // initial HTLC-Add messages yet. }, PaymentSendFailure::PathParameterError(results) => { - log_error!(self.logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); + log_error!(logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); - Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), &self.logger, pending_events); + Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), pending_events); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::ParameterError(e) => { - log_error!(self.logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); + log_error!(logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, @@ -1740,7 +1735,7 @@ where I: ExactSizeIterator + Iterator>, >( payment_id: PaymentId, payment_hash: PaymentHash, route_params: &mut RouteParameters, - paths: Vec, path_results: I, logger: &L, + paths: Vec, path_results: I, pending_events: &Mutex)>>, ) { let mut events = pending_events.lock().unwrap(); @@ -2228,7 +2223,7 @@ where if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { if !payment.get().is_fulfilled() { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array()); - log_info!(self.logger, "Payment with id {} and hash {} sent!", payment_id, payment_hash); + log_info!(logger, "Payment with id {} and hash {} sent!", payment_id, payment_hash); let fee_paid_msat = payment.get().get_pending_fee_msat(); let amount_msat = payment.get().total_msat(); pending_events.push_back((events::Event::PaymentSent { @@ -2258,7 +2253,7 @@ where } } } else { - log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", &payment_preimage); + log_trace!(logger, "Received duplicative fulfill for HTLC with payment_preimage {}", &payment_preimage); } } @@ -2395,7 +2390,7 @@ where failed_within_blinded_path, hold_times, .. - } = onion_error.decode_onion_failure(secp_ctx, &self.logger, &source); + } = onion_error.decode_onion_failure(secp_ctx, &logger, &source); #[cfg(not(any(test, feature = "_test_utils")))] let DecodedOnionFailure { network_update, @@ -2404,7 +2399,7 @@ where failed_within_blinded_path, hold_times, .. - } = onion_error.decode_onion_failure(secp_ctx, &self.logger, &source); + } = onion_error.decode_onion_failure(secp_ctx, &logger, &source); let payment_is_probe = payment_is_probe(payment_hash, &payment_id, probing_cookie_secret); let mut session_priv_bytes = [0; 32]; @@ -2429,7 +2424,7 @@ where if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(*payment_id) { if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) { log_trace!( - self.logger, + logger, "Received duplicative fail for HTLC with payment_hash {}", &payment_hash ); @@ -2437,7 +2432,7 @@ where } if payment.get().is_fulfilled() { log_trace!( - self.logger, + logger, "Received failure of HTLC with payment_hash {} after payment completion", &payment_hash ); @@ -2485,18 +2480,13 @@ where is_retryable_now } else { log_trace!( - self.logger, - "Received duplicative fail for HTLC with payment_hash {}", - &payment_hash + logger, + "Received duplicative fail for HTLC with payment_hash {payment_hash}" ); return; }; core::mem::drop(outbounds); - log_trace!( - self.logger, - "Failing outbound payment HTLC with payment_hash {}", - &payment_hash - ); + log_trace!(logger, "Failing outbound payment HTLC with payment_hash {payment_hash}"); let path_failure = { if payment_is_probe { @@ -2670,12 +2660,12 @@ where entry.get_mut().insert(session_priv_bytes, &path) }, }; - log_info!(self.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", + log_info!(logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), payment_hash); }, hash_map::Entry::Vacant(entry) => { entry.insert(new_retryable!()); - log_info!(self.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", + log_info!(logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", path_amt, payment_hash, log_bytes!(session_priv_bytes)); }, } @@ -2834,6 +2824,7 @@ mod tests { use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::errors::APIError; use crate::util::hash_tables::new_hash_map; + use crate::util::logger::WithContext; use crate::util::test_utils; use alloc::collections::VecDeque; @@ -2871,7 +2862,9 @@ mod tests { #[rustfmt::skip] fn do_fails_paying_after_expiration(on_retry: bool) { let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -2889,11 +2882,11 @@ mod tests { outbound_payments.add_new_pending_payment(PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), None, &Route { paths: vec![], route_params: None }, Some(Retry::Attempts(1)), Some(expired_route_params.payment_params.clone()), - &&keys_manager, 0, None).unwrap(); + &&keys_manager, 0, None, &log).unwrap(); outbound_payments.find_route_and_send_payment( PaymentHash([0; 32]), PaymentId([0; 32]), expired_route_params, &&router, vec![], &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - &|_| Ok(())); + &|_| Ok(()), &log); let events = pending_events.lock().unwrap(); assert_eq!(events.len(), 1); if let Event::PaymentFailed { ref reason, .. } = events[0].0 { @@ -2903,7 +2896,7 @@ mod tests { let err = outbound_payments.send_payment( PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), expired_route_params, &&router, vec![], || InFlightHtlcs::new(), - &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(())).unwrap_err(); + &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(()), &log).unwrap_err(); if let RetryableSendFailure::PaymentExpired = err { } else { panic!("Unexpected error"); } } } @@ -2916,7 +2909,9 @@ mod tests { #[rustfmt::skip] fn do_find_route_error(on_retry: bool) { let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -2933,11 +2928,11 @@ mod tests { outbound_payments.add_new_pending_payment(PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), None, &Route { paths: vec![], route_params: None }, Some(Retry::Attempts(1)), Some(route_params.payment_params.clone()), - &&keys_manager, 0, None).unwrap(); + &&keys_manager, 0, None, &log).unwrap(); outbound_payments.find_route_and_send_payment( PaymentHash([0; 32]), PaymentId([0; 32]), route_params, &&router, vec![], &|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - &|_| Ok(())); + &|_| Ok(()), &log); let events = pending_events.lock().unwrap(); assert_eq!(events.len(), 1); if let Event::PaymentFailed { .. } = events[0].0 { } else { panic!("Unexpected event"); } @@ -2945,7 +2940,7 @@ mod tests { let err = outbound_payments.send_payment( PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), route_params, &&router, vec![], || InFlightHtlcs::new(), - &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(())).unwrap_err(); + &&keys_manager, &&keys_manager, 0, &pending_events, |_| Ok(()), &log).unwrap_err(); if let RetryableSendFailure::RouteNotFound = err { } else { panic!("Unexpected error"); } } @@ -2955,7 +2950,9 @@ mod tests { #[rustfmt::skip] fn initial_send_payment_path_failed_evs() { let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -2995,7 +2992,7 @@ mod tests { PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(), &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - |_| Err(APIError::ChannelUnavailable { err: "test".to_owned() })).unwrap(); + |_| Err(APIError::ChannelUnavailable { err: "test".to_owned() }), &log).unwrap(); let mut events = pending_events.lock().unwrap(); assert_eq!(events.len(), 2); if let Event::PaymentPathFailed { @@ -3013,7 +3010,7 @@ mod tests { PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), Retry::Attempts(0), route_params.clone(), &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - |_| Err(APIError::MonitorUpdateInProgress)).unwrap(); + |_| Err(APIError::MonitorUpdateInProgress), &log).unwrap(); assert_eq!(pending_events.lock().unwrap().len(), 0); // Ensure that any other error will result in a PaymentPathFailed event but no blamed scid. @@ -3021,7 +3018,7 @@ mod tests { PaymentHash([0; 32]), RecipientOnionFields::spontaneous_empty(), PaymentId([1; 32]), Retry::Attempts(0), route_params.clone(), &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &pending_events, - |_| Err(APIError::APIMisuseError { err: "test".to_owned() })).unwrap(); + |_| Err(APIError::APIMisuseError { err: "test".to_owned() }), &log).unwrap(); let events = pending_events.lock().unwrap(); assert_eq!(events.len(), 2); if let Event::PaymentPathFailed { @@ -3038,32 +3035,34 @@ mod tests { fn removes_stale_awaiting_invoice_using_absolute_timeout() { let pending_events = Mutex::new(VecDeque::new()); let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let absolute_expiry = 100; let tick_interval = 10; let expiration = StaleExpiration::AbsoluteTimeout(Duration::from_secs(absolute_expiry)); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); assert!( outbound_payments.add_new_awaiting_invoice( - payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, + payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, &log, ).is_ok() ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); for seconds_since_epoch in (0..absolute_expiry).step_by(tick_interval) { let duration_since_epoch = Duration::from_secs(seconds_since_epoch); - outbound_payments.remove_stale_payments(duration_since_epoch, &pending_events); + outbound_payments.remove_stale_payments(duration_since_epoch, &pending_events, &log); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); assert!(pending_events.lock().unwrap().is_empty()); } let duration_since_epoch = Duration::from_secs(absolute_expiry); - outbound_payments.remove_stale_payments(duration_since_epoch, &pending_events); + outbound_payments.remove_stale_payments(duration_since_epoch, &pending_events, &log); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); assert!(!pending_events.lock().unwrap().is_empty()); assert_eq!( pending_events.lock().unwrap().pop_front(), @@ -3077,14 +3076,14 @@ mod tests { assert!( outbound_payments.add_new_awaiting_invoice( - payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, + payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, &log, ).is_ok() ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); assert!( outbound_payments.add_new_awaiting_invoice( - payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, + payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, &log, ).is_err() ); } @@ -3094,31 +3093,33 @@ mod tests { fn removes_stale_awaiting_invoice_using_timer_ticks() { let pending_events = Mutex::new(VecDeque::new()); let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let timer_ticks = 3; let expiration = StaleExpiration::TimerTicks(timer_ticks); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); assert!( outbound_payments.add_new_awaiting_invoice( - payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, + payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, &log, ).is_ok() ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); for i in 0..timer_ticks { let duration_since_epoch = Duration::from_secs(i * 60); - outbound_payments.remove_stale_payments(duration_since_epoch, &pending_events); + outbound_payments.remove_stale_payments(duration_since_epoch, &pending_events, &log); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); assert!(pending_events.lock().unwrap().is_empty()); } let duration_since_epoch = Duration::from_secs(timer_ticks * 60); - outbound_payments.remove_stale_payments(duration_since_epoch, &pending_events); + outbound_payments.remove_stale_payments(duration_since_epoch, &pending_events, &log); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); assert!(!pending_events.lock().unwrap().is_empty()); assert_eq!( pending_events.lock().unwrap().pop_front(), @@ -3132,14 +3133,14 @@ mod tests { assert!( outbound_payments.add_new_awaiting_invoice( - payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, + payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, &log, ).is_ok() ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); assert!( outbound_payments.add_new_awaiting_invoice( - payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, + payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, &log, ).is_err() ); } @@ -3149,22 +3150,24 @@ mod tests { fn removes_abandoned_awaiting_invoice() { let pending_events = Mutex::new(VecDeque::new()); let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let expiration = StaleExpiration::AbsoluteTimeout(Duration::from_secs(100)); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); assert!( outbound_payments.add_new_awaiting_invoice( - payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, + payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, &log, ).is_ok() ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); outbound_payments.abandon_payment( - payment_id, PaymentFailureReason::UserAbandoned, &pending_events + payment_id, PaymentFailureReason::UserAbandoned, &pending_events, &log ); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); assert!(!pending_events.lock().unwrap().is_empty()); assert_eq!( pending_events.lock().unwrap().pop_front(), @@ -3180,6 +3183,8 @@ mod tests { #[rustfmt::skip] fn fails_sending_payment_for_expired_bolt12_invoice() { let logger = test_utils::TestLogger::new(); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -3189,16 +3194,16 @@ mod tests { let nonce = Nonce([0; 16]); let pending_events = Mutex::new(VecDeque::new()); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let expiration = StaleExpiration::AbsoluteTimeout(Duration::from_secs(100)); assert!( outbound_payments.add_new_awaiting_invoice( - payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, + payment_id, expiration, Retry::Attempts(0), RouteParametersConfig::default(), None, &log, ).is_ok() ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); let created_at = now() - DEFAULT_RELATIVE_EXPIRY; let invoice = OfferBuilder::new(recipient_pubkey()) @@ -3214,11 +3219,11 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::SendingFailed(RetryableSendFailure::PaymentExpired)), ); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); let payment_hash = Some(invoice.payment_hash()); let reason = Some(PaymentFailureReason::PaymentExpired); @@ -3235,6 +3240,8 @@ mod tests { #[rustfmt::skip] fn fails_finding_route_for_bolt12_invoice() { let logger = test_utils::TestLogger::new(); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -3242,7 +3249,7 @@ mod tests { let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet); let pending_events = Mutex::new(VecDeque::new()); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let expanded_key = ExpandedKey::new([42; 32]); let nonce = Nonce([0; 16]); let payment_id = PaymentId([0; 32]); @@ -3262,10 +3269,10 @@ mod tests { assert!( outbound_payments.add_new_awaiting_invoice( payment_id, expiration, Retry::Attempts(0), - route_params_config, None, + route_params_config, None, &log, ).is_ok() ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::from_bolt12_invoice(&invoice), @@ -3277,11 +3284,11 @@ mod tests { outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::SendingFailed(RetryableSendFailure::RouteNotFound)), ); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); let payment_hash = Some(invoice.payment_hash()); let reason = Some(PaymentFailureReason::RouteNotFound); @@ -3298,6 +3305,8 @@ mod tests { #[rustfmt::skip] fn sends_payment_for_bolt12_invoice() { let logger = test_utils::TestLogger::new(); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); @@ -3305,7 +3314,7 @@ mod tests { let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet); let pending_events = Mutex::new(VecDeque::new()); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let outbound_payments = OutboundPayments::new(new_hash_map()); let expanded_key = ExpandedKey::new([42; 32]); let nonce = Nonce([0; 16]); let payment_id = PaymentId([0; 32]); @@ -3348,47 +3357,47 @@ mod tests { }) ); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::UnexpectedInvoice), ); - assert!(!outbound_payments.has_pending_payments()); + assert!(!outbound_payments.has_pending_payments(&log)); assert!(pending_events.lock().unwrap().is_empty()); let route_params_config = RouteParametersConfig::default().with_max_total_routing_fee_msat(1234); assert!( outbound_payments.add_new_awaiting_invoice( - payment_id, expiration, Retry::Attempts(0), route_params_config, None, + payment_id, expiration, Retry::Attempts(0), route_params_config, None, &log, ).is_ok() ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| Ok(()) + &secp_ctx, 0, &pending_events, |_| Ok(()), &log ), Ok(()), ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); assert!(pending_events.lock().unwrap().is_empty()); assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, - &secp_ctx, 0, &pending_events, |_| panic!() + &secp_ctx, 0, &pending_events, |_| panic!(), &log ), Err(Bolt12PaymentError::DuplicateInvoice), ); - assert!(outbound_payments.has_pending_payments()); + assert!(outbound_payments.has_pending_payments(&log)); assert!(pending_events.lock().unwrap().is_empty()); } @@ -3414,7 +3423,9 @@ mod tests { fn time_out_unreleased_async_payments() { let pending_events = Mutex::new(VecDeque::new()); let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let absolute_expiry = 60; @@ -3440,7 +3451,7 @@ mod tests { core::mem::drop(outbounds); // The payment will not be removed if it isn't expired yet. - outbound_payments.remove_stale_payments(Duration::from_secs(absolute_expiry), &pending_events); + outbound_payments.remove_stale_payments(Duration::from_secs(absolute_expiry), &pending_events, &log); let outbounds = outbound_payments.pending_outbound_payments.lock().unwrap(); assert_eq!(outbounds.len(), 1); let events = pending_events.lock().unwrap(); @@ -3448,7 +3459,7 @@ mod tests { core::mem::drop(outbounds); core::mem::drop(events); - outbound_payments.remove_stale_payments(Duration::from_secs(absolute_expiry + 1), &pending_events); + outbound_payments.remove_stale_payments(Duration::from_secs(absolute_expiry + 1), &pending_events, &log); let outbounds = outbound_payments.pending_outbound_payments.lock().unwrap(); assert_eq!(outbounds.len(), 0); let events = pending_events.lock().unwrap(); @@ -3465,7 +3476,9 @@ mod tests { fn abandon_unreleased_async_payment() { let pending_events = Mutex::new(VecDeque::new()); let logger = test_utils::TestLogger::new(); - let outbound_payments = OutboundPayments::new(new_hash_map(), &logger); + let logger_ref = &logger; + let log = WithContext::from(&logger_ref, None, None, Some(PaymentHash([0; 32]))); + let outbound_payments = OutboundPayments::new(new_hash_map()); let payment_id = PaymentId([0; 32]); let absolute_expiry = 60; @@ -3491,7 +3504,7 @@ mod tests { core::mem::drop(outbounds); outbound_payments.abandon_payment( - payment_id, PaymentFailureReason::UserAbandoned, &pending_events + payment_id, PaymentFailureReason::UserAbandoned, &pending_events, &log ); let outbounds = outbound_payments.pending_outbound_payments.lock().unwrap(); assert_eq!(outbounds.len(), 0); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index e387ac3bfdd..d8c71c0ea1a 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -17,7 +17,9 @@ use crate::events::bump_transaction::sync::WalletSourceSync; use crate::events::{ClosureReason, Event, FundingInfo, HTLCHandlingFailureType}; use crate::ln::chan_utils; use crate::ln::channel::CHANNEL_ANNOUNCEMENT_PROPAGATION_DELAY; -use crate::ln::channelmanager::{PaymentId, RecipientOnionFields, BREAKDOWN_TIMEOUT}; +use crate::ln::channelmanager::{ + provided_init_features, PaymentId, RecipientOnionFields, BREAKDOWN_TIMEOUT, +}; use crate::ln::functional_test_utils::*; use crate::ln::funding::{FundingTxInput, SpliceContribution}; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; @@ -30,6 +32,69 @@ use crate::util::test_channel_signer::SignerOp; use bitcoin::secp256k1::PublicKey; use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; +#[test] +fn test_splicing_not_supported_api_error() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut features = provided_init_features(&test_default_channel_config()); + features.clear_splicing(); + *node_cfgs[0].override_init_features.borrow_mut() = Some(features); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_id_0 = nodes[0].node.get_our_node_id(); + let node_id_1 = nodes[1].node.get_our_node_id(); + + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); + + let bs_contribution = SpliceContribution::SpliceIn { + value: Amount::ZERO, + inputs: Vec::new(), + change_script: None, + }; + + let res = nodes[1].node.splice_channel( + &channel_id, + &node_id_0, + bs_contribution.clone(), + 0, // funding_feerate_per_kw, + None, // locktime + ); + match res { + Err(APIError::ChannelUnavailable { err }) => { + assert!(err.contains("Peer does not support splicing")) + }, + _ => panic!("Wrong error {:?}", res.err().unwrap()), + } + + nodes[0].node.peer_disconnected(node_id_1); + nodes[1].node.peer_disconnected(node_id_0); + + let mut features = nodes[0].node.init_features(); + features.set_splicing_optional(); + features.clear_quiescence(); + *nodes[0].override_init_features.borrow_mut() = Some(features); + + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, true); + reconnect_args.send_announcement_sigs = (true, true); + reconnect_nodes(reconnect_args); + + let res = nodes[1].node.splice_channel( + &channel_id, + &node_id_0, + bs_contribution, + 0, // funding_feerate_per_kw, + None, // locktime + ); + match res { + Err(APIError::ChannelUnavailable { err }) => { + assert!(err.contains("Peer does not support quiescence, a splicing prerequisite")) + }, + _ => panic!("Wrong error {:?}", res.err().unwrap()), + } +} + #[test] fn test_v1_splice_in_negative_insufficient_inputs() { let chanmon_cfgs = create_chanmon_cfgs(2);