From 76131619b08307e02590773042b20251c927e7c5 Mon Sep 17 00:00:00 2001 From: Philip Kannegaard Hayes Date: Wed, 12 Mar 2025 17:26:13 -0700 Subject: [PATCH 1/5] lightning-invoice: explicitly enforce a 7089 B max length on deser The new `bech32-v0.11.0` version (prev: `v0.9.1`) now enforces a max length of 1023 bytes. Before there was no max. BOLT11 invoices can definitely exceed 1023 B with a long-ish description and 2 route hints, so this limit is likely too low. Having a limit is probably a good idea. What do other projects choose? Here's a brief survey: LDK (pre-0.1): (no limit) LDK (post-0.1): 1023 B LDK (post-PR): 7089 B LND[1]: 7089 B CLN[2]: (no limit) ACINQ[3][4]: (no limit) LND uses 7089 B, which was chosen to be "the max number of bytes that can fit in a QR code". LND's rationale is technically incorrect as QR codes actually have a max capacity of 7089 _numeric_ characters and only support up to 4296 all-uppercase alphanumeric characters. However, ecosystem-wide consistency is more important. A more conservative limit that would probably also suffice might be 2953 B, the QR code length limit for a lowercase bech32-encoded invoice. [1]: https://github.com/lightningnetwork/lnd/blob/6531d4505098eb14e6c24aedfd752fc15e85845d/zpay32/invoice.go#L87 [2]: https://github.com/ElementsProject/lightning/blob/0e7615b1b73eee161911763840d6260baf596755/common/bolt11.c#L683 [3]: https://github.com/ACINQ/lightning-kmp/blob/feda82c853660a792b911be518367a228ed6e0ee/modules/core/src/commonMain/kotlin/fr/acinq/lightning/payment/Bolt11Invoice.kt#L165 [4]: https://github.com/ACINQ/bitcoin-kmp/blob/master/src/commonMain/kotlin/fr/acinq/bitcoin/Bech32.kt#L122 --- lightning-invoice/src/de.rs | 245 ++++++++++++++++++++++++++++++++++- lightning-invoice/src/lib.rs | 28 +++- 2 files changed, 270 insertions(+), 3 deletions(-) diff --git a/lightning-invoice/src/de.rs b/lightning-invoice/src/de.rs index ee071d6349a..85a0924ce22 100644 --- a/lightning-invoice/src/de.rs +++ b/lightning-invoice/src/de.rs @@ -9,9 +9,10 @@ use core::str::FromStr; use std::error; use bech32::primitives::decode::{CheckedHrpstring, CheckedHrpstringError}; -use bech32::{Bech32, Fe32, Fe32IterExt}; +use bech32::{Fe32, Fe32IterExt}; use crate::prelude::*; +use crate::Bolt11Bech32; use bitcoin::hashes::sha256; use bitcoin::hashes::Hash; use bitcoin::{PubkeyHash, ScriptHash, WitnessVersion}; @@ -377,7 +378,7 @@ impl FromStr for SignedRawBolt11Invoice { type Err = Bolt11ParseError; fn from_str(s: &str) -> Result { - let parsed = CheckedHrpstring::new::(s)?; + let parsed = CheckedHrpstring::new::(s)?; let hrp = parsed.hrp(); // Access original non-packed 32 byte values (as Fe32s) // Note: the type argument is needed due to the API peculiarities, but it's not used @@ -1175,4 +1176,244 @@ mod test { ) ) } + + // Test some long invoice test vectors successfully roundtrip. Generated + // from Lexe proptest: . + #[test] + fn test_deser_long_test_vectors() { + use crate::Bolt11Invoice; + + #[track_caller] + fn parse_ok(invoice_str: &str) { + let invoice = Bolt11Invoice::from_str(invoice_str).unwrap(); + let invoice_str2 = invoice.to_string(); + if invoice_str != invoice_str2 { + panic!( + "Invoice does not roundtrip: invoice_str != invoice_str2\n\ + invoice_str: {invoice_str}\n\ + invoice_str2: {invoice_str2}\n\ + \n\ + {invoice:?}" + ); + } + } + + // 1024 B shrunk invoice just above previous limit of 1023 B from Lexe proptest + parse_ok( + "lnbc10000000000000000010p1qqqqqqqdtuxpqkzq8sjzqgps4pvyczqq8sjzqgpuysszq0pyyqsrp2zs0sjz\ + qgps4pxrcfpqyqc2slpyyqsqsv9gwz59s5zqpqyps5rc9qsrs2pqxz5ysyzcfqgysyzs0sjzqgqq8sjzqgps4p\ + xqqzps4pqpssqgzpxps5ruysszqrps4pg8p2zgpsc2snpuysszqzqsgqvys0pyyqsrcfpqyqvycv9gfqqrcfpq\ + yq7zggpq8q5zqyruysszqwpgyqxpsjqsgq7zggpqps7zggpq8sjzqgqgqq7zggpqpq7zggpq8q5zqqpuysszq0\ + pyyqsqs0pyyqspsnqgzpqpqlpyyqsqszpuysszqyzvzpvysrqq8sjzqgqvrp7zggpqpqxpsspp5mf45hs3cgph\ + h0074r5qmr74y82r26ac4pzdg4nd9mdmsvz6ffqpssp5vr4yra4pcv74h9hk3d0233nqu4gktpuykjamrafrdp\ + uedqugzh3q9q2sqqqqqysgqcqrpqqxq8pqqqqqqnp4qgvcxpme2q5lng36j9gruwlrtk2f86s3c5xmk87yhvyu\ + wdeh025q5r9yqwnqegv9hj9nzkhyxaeyq92wcrnqp36pyrc2qzrvswj5g96ey2dn6qqqqqqqqqqqqqqqqqqqqq\ + qqqqqqqqp9a5vs0t4z56p64xyma8s84yvdx7uhqj0gvrr424fea2wpztq2fwqqqqqqqqqqqqqqqqqqqqqqqqqq\ + qqqqmy9qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\ + qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpcnsxc32du9n7amlypuhclzqrt6lkegq\ + 0v3r7nczjv9tv30z7phq80r3dm7pvgykl7gwuenmem93h5xwdwac6ngsmzqc34khrg3qjgsq6qk6lc" + ); + // 1517 B mainnet invoice from Lexe proptest + parse_ok( + "lnbc8735500635020489010p1av5kfs8deupvyk4u5ynj03hmalhhhml0fxc2jlrv9z4lg6s4hnhkz69malhhe\ + t3x9yqpsxru4a3kwar2qtu2q2ughx367q600s5x7c7tln4k0fu78skxqevaqm8sayhuur377zgf3uf94n57xzh\ + dw99u42hwc089djn5xj723w7zageflsnzdmyte89tecf2ac7xhg4y3u9f4xpuv2hwxjlsarp0e24fu8tme6rgv\ + 0tqj08z9f4u30rw59k8emhtvs7wye0xfw6x5q5tju2p208rvtkunzwtwghtp22tlnh62gxwhfkxp4cnz7ts3rx\ + vlzszhv9y00h77lpdvcjyhjtmalh5dn5e8n5w8cqle0vunzduu4nza9y0734qhxday9hzywl0aa0vhzy0qmphc\ + 64d4hduj08dv2krpgqtc2v83gptk34reelxyc7wsgnze890c6nrv6p0cmepatc269eayzjjkqk30n52rfl5dg7\ + wztl96f7wc2tzx34q909xuajnyt4u4lnk87lwal7z0etdz5tmece0v3u796jfp68nccn05ty54ncfelts3v8g0\ + sn6v6hsu87zat4r03368ersu87252dd0nswymxzc2pyxl8yy844hspuyj47w0px4u4leefq568sk0rr9th4ql9\ + f9ykawrczkz5hp22nstg3lrlsa6u2q2ull3kzce2sh0h77sjv0zszhzy4hfh6u0pwux5l3gpthsn72mfu47sw9\ + zw3hzk7srznp27z0etdp0725me00sn72mgkf0fteehruk0lg6swh34z52puaekzmjlmalhhe6m8ug7z3c8g8zh\ + jjspp5zj0sm85g5ufng9w7s6p4ucdk80tyvz64sg54v0cy4vgnr37f78sqsp5l6azu2hv6we30er90jrslqpvd\ + trnrphhesca2wg5q83k52rsu2cq9q2sqqqqqysgqcqr8h2np4qw0ha2k282hm8jh5rcfq0hsp2zhddtlc5vs23\ + uphyv0lv3k8sqsfgfp4qyrk86tx5xg2aa7et4cdzhnvl5s4nd33ugytt7gamk9tugn9yransr9yq08gpwsn8t2\ + tq4ducjfhrcz707av0ss20urjh8vldrpmehqxa0stkesvuq82txyqzfhej7qccswy7k5wvcppk63c6zpjytfda\ + ccadacjtn52lpe6s85rjfqlxzp6frq33xshaz2nr9xjkhd3jj8qg39nmfzvpgmayakqmy9rseakwgcudug7hs4\ + 5wh430ywh7qhj3khczh8gle4cn93ymgfwa7rrvcw9lywyyz58k4p40a3nu9svthaf0qeg8f2ay4tw9p48p70qm\ + ayu3ejl2q8pj9e2l22h7775tl44hs6ke4sdfgcr6aj8wra4r2v9sj6xa5chd5ctpfg8chtrer3kkp0e6af88lk\ + rfxcklf2hyslv2hr0xl5lwrm5y5uttxn4ndfz8789znf78nspa3xy68" + ); + // 1804 B regtest invoice from Lexe proptest + parse_ok( + "lnbcrt17124979001314909880p1y6lkcwgd76tfnxksfk2atyy4tzw4nyg6jrx3282s2ygvcxyj64gevhxsjk\ + 2ymhzv3e0p5h5u3kfey92jt9ge44gsfnwycxynm2g3unw3ntt9qh25texe98jcfhxvcxuezxw9tngwrndpy9s4\ + p4x9eyze2tfe9rxm68tp5yj5jfduen2nny8prhsm6edegn2stww4n4gwp4vfjkvdthd43524n9fa8h262vwesk\ + g66nw3vnyafn29zhsvfeg9mxummtfp35uumzfqmhy3jwgdh55mt5xpvhgmjn25uku5e5g939wmmnvdfygnrdgd\ + h56uzcx4a92vfhgdcky3z9gfnrsvp4f4f55j68vak9yufhvdm8x5zrgc6955jvf429zumv89nh2a35wae5yntg\ + v985jumpxehyv7t92pjrwufs89yh23f5ddy5s568wgchve3cg9ek5nzewgcrzjz0dftxg3nvf4hngje52ac4zm\ + esxpvk6sfef4hkuetvd4vk6n29wftrw5rvg4yy2vjjwyexc5mnvfd8xknndpqkkenx0q642j35298hwve3dyc5\ + 25jrd3295sm9v9jrqup3wpykg7zd239ns7jgtqu95jz0deaxksjh2fu56n6n2f5x6mm8wa89qjfef385sam2x9\ + mxcs20gfpnq460d3axzknnf3e4sw2kvf25wjjxddpyg52dw4vx7nn2w9cyu5t8vfnyxjtpg33kssjp24ch536p\ + d938snmtx345x6r4x93kvv2tff855um3tfekxjted4kxys2kve5hvu6g89z4ynmjgfhnw7tv892rymejgvey77\ + rcfqe9xjr92d85636fvajxyajndfa92k2nxycx5jtjx4zxsm2y2dyn2up50f5ku3nrfdk4g5npxehkzjjv8y69\ + gveev4z56denddaxy7tfwe8xx42zgf6kzmnxxpk826ze2s6xk6jrwearw6ejvd8rsvj2fpg525jtd5pp5j2tlt\ + 28m4kakjr84w6ce4fd8e7awy6ncyswcyut760rdnem30ptssp5p5u3xgxxtr6aev8y2w9m30wcw3kyn7fgm8wm\ + f8qw8wzrqt34zcvq9q2sqqqqqysgqcqypmw9xq8lllllllnp4qt36twam2ca08m3s7vnhre3c0j89589wyw4vd\ + k7fln0lryxzkdcrur28qwqq3hnyt84vsasuldd2786eysdf4dyuggwsmvw2atftf7spkmpa9dd3efq5tenpqm2\ + v7vcz2a4s0s7jnqpjn0srysnstnw5y5z9taxn0ue37aqgufxcdsj6f8a2m4pm9udppdzc4shsdqzzx0u0rm4xl\ + js0dqz3c5zqyvglda7nsqvqfztmlyup7vyuadzav4zyuqwx90ev6nmk53nkhkt0sev9e745wxqtdvrqzgqkaka\ + zen7e2qmsdauk665g3llg5qtl79t3xulrhjnducehdn72gpmkjvtth7kh6ejpl9dv0qcsxv2jvzzvg0hzdmk3y\ + jsmydqksdk3h78kc63qnr265h8vyeslqexszppfm7y287t3gxvhw0ulg2wp0rsw3tevz03z50kpy77zdz9snxm\ + kkwxd76xvj4qvj2f89rrnuvdvzw947ay0kydc077pkec2jet9qwp2tud98s24u65uz07eaxk5jk3e4nggn2caa\ + ek2p5pkrc6mm6mxjm2ezpdu8p5jstg6tgvnttgac3ygt5ys04t4udujzlshpl7e4f3ff03xe6v24cp6aq4wa" + ); + // 1870 B testnet invoice from Lexe proptest + parse_ok( + "lntb5826417333454665580p1c5rwh5edlhf33hvkj5vav5z3t02a5hxvj3vfv5kuny2f3yzj6zwf9hx3nn2fk\ + 9gepc2a3ywvj6dax5v3jy2d5nxmp3gaxhycjkv38hx4z4d4vyznrp2p24xa6t2pg4w4rrxfens6tcxdhxvvfhx\ + a8xvvpkgat8xnpe2p44juz9g43hyur00989gvfhwd2kj72wfum4g4mgx5m5cs2rg9d9vnn6xe89ydnnvfpyy52\ + s2dxx2er4x4xxwstdd5cxwdrjw3nkxnnv2uexxnrxw4t56sjswfn52s2xv4t8xmjtwpn8xm6sfeh4q526dyu8x\ + 3r9gceyw6fhd934qjttvdk57az5w368zdrhwfjxxu35xcmrsmmpd4g8wwtev4tkzutdd32k56mxveuy6c6v2em\ + yv7zkfp39zjpjgd8hx7n4xph5kceswf6xxmnyfcuxca20fp24z7ncvfhyu5jf2exhw36nwf68s7rh2a6yzjf4d\ + gukcenfxpchqsjn2pt5x334tf98wsm6dvcrvvfcwapxvk2cdvmk2npcfe68zue3w4f9xc6s2fvrw6nrg3fkskt\ + e2ftxyc20ffckcd692964sdzjwdp4yvrfdfm9q72pxp3kwat5f4j9xee5da8rss60w92857tgwych55f5w3n8z\ + mzexpy4jwredejrqm6txf3nxm64ffh8x460dp9yjazhw4yx6dm5xerysnn5wa455k3h2d89ss2fd9axwjp3f4r\ + 9qdmfd4fx6stx2eg9sezrv369w7nvvfvhj4nnwaz5z3ny8qcxcdnvwd64jc2nx9uy2e2gxdrnx6r3w9ykxatxx\ + g6kk6rv2ekr2emwx5ehy362d3x82dzvddfxs5rcg4vn27npf564qdtg2anycc6523jnwe3e0p65unrpvccrs5m\ + 2fuexgmnj23ay5e34v4xk5jnrwpg4xemfwqe5vjjjw9qk76zsd9yrzu6xdpv5v5ntdejxg6jtv3kx65t6gdhrg\ + vj3fe34sj2vv3h5kegpp57hjf5kv6clw97y2e063yuz0psrz9a6l49v836dflum00rh8qtn8qsp5gd29qycuze\ + 08xls8l32zjaaf2uqv78v97lg9ss0c699huw980h2q9q2sqqqqqysgqcqr8ulnp4q26hcfwr7qxz7lwwlr2kjc\ + rws7m2u5j36mm0kxa45uxy6zvsqt2zzfppjdkrm2rlgadt9dq3d6jkv4r2cugmf2kamr28qwuleyzzyyly8a6t\ + u70eldahx7hzxx5x9gms7vjjr577ps8n4qyds5nern39j0v7czkch2letnt46895jupxgehf208xgxz8d6j8gu\ + 3h2qqtsk9nr9nuquhkqjxw40h2ucpldrawmktxzxdgtkt9a3p95g98nywved8s8laj2a0c98rq5zzdnzddz6nd\ + w0lvr6u0av9m7859844cgz9vpeq05gw79zqae2s7jzeq66wydyueqtp56qc67g7krv6lj5aahxtmq4y208q5qy\ + z38cnwl9ma6m5f4nhzqaj0tjxpfrk4nr5arv9d20lvxvddvffhzygmyuvwd959uhdcgcgjejchqt2qncuwpqqk\ + 5vws7dflw8x6esrfwhz7h3jwmhevf445k76nme926sr8drsdveqg7l7t7lnjvhaludqnwk4l2pmevkjf9pla92\ + 4p77v76r7x8jzyy7h59hmk0lgzfsk6c8dpj37hssj7jt4q7jzvy8hq25l3pag37axxanjqnq56c47gpgy6frsy\ + c0str9w2aahz4h6t7axaka4cwvhwg49r6qgj8kwz2mt6vcje25l9ekvmgq5spqtn" + ); + } + + // Generate a valid invoice of `MAX_LENGTH` bytes and ensure that it roundtrips. + #[test] + fn test_serde_long_invoice() { + use crate::TaggedField::*; + use crate::{ + Bolt11Invoice, Bolt11InvoiceFeatures, Bolt11InvoiceSignature, Currency, + PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp, RawTaggedField, Sha256, + SignedRawBolt11Invoice, + }; + use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; + use bitcoin::secp256k1::PublicKey; + use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; + + // Generate an `UnknownSemantics` field with a given length. + fn unknown_semantics_field(len: usize) -> Vec { + assert!(len <= 1023); + let mut field = Vec::with_capacity(len + 3); + // Big-endian encoded length prefix + field.push(Fe32::Q); + field.push(Fe32::try_from((len >> 5) as u8).unwrap()); + field.push(Fe32::try_from((len & 0x1f) as u8).unwrap()); + // Data + field.extend(std::iter::repeat(Fe32::P).take(len)); + field + } + + // Invoice fields + let payment_hash = sha256::Hash::from_str( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(); + let description = std::iter::repeat("A").take(639).collect::(); + let fallback_addr = crate::Fallback::SegWitProgram { + version: bitcoin::WitnessVersion::V0, + program: vec![0; 32], + }; + let payee_pk = PublicKey::from_slice(&[ + 0x03, 0x24, 0x65, 0x3e, 0xac, 0x43, 0x44, 0x88, 0x00, 0x2c, 0xc0, 0x6b, 0xbf, 0xb7, + 0xf1, 0x0f, 0xe1, 0x89, 0x91, 0xe3, 0x5f, 0x9f, 0xe4, 0x30, 0x2d, 0xbe, 0xa6, 0xd2, + 0x35, 0x3d, 0xc0, 0xab, 0x1c, + ]) + .unwrap(); + let route_hints = std::iter::repeat(RouteHintHop { + src_node_id: payee_pk, + short_channel_id: 0x0102030405060708, + fees: RoutingFees { base_msat: 1, proportional_millionths: 20 }, + cltv_expiry_delta: 3, + htlc_minimum_msat: None, + htlc_maximum_msat: None, + }) + .take(12) + .collect::>(); + + // Build raw invoice + let raw_invoice = RawBolt11Invoice { + hrp: RawHrp { + currency: Currency::Bitcoin, + raw_amount: Some(10000000000000000010), + si_prefix: Some(crate::SiPrefix::Pico), + }, + data: RawDataPart { + timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), + tagged_fields: vec![ + PaymentHash(Sha256(payment_hash)).into(), + Description(crate::Description::new(description).unwrap()).into(), + PayeePubKey(crate::PayeePubKey(payee_pk)).into(), + ExpiryTime(crate::ExpiryTime(std::time::Duration::from_secs(u64::MAX))).into(), + MinFinalCltvExpiryDelta(crate::MinFinalCltvExpiryDelta(u64::MAX)).into(), + Fallback(fallback_addr).into(), + PrivateRoute(crate::PrivateRoute(RouteHint(route_hints))).into(), + PaymentSecret(crate::PaymentSecret([17; 32])).into(), + PaymentMetadata(vec![0x69; 639]).into(), + Features(Bolt11InvoiceFeatures::from_le_bytes(vec![0xaa; 639])).into(), + // This invoice is 4458 B w/o unknown semantics fields. + // Need to add some non-standard fields to reach 7089 B limit. + RawTaggedField::UnknownSemantics(unknown_semantics_field(1023)), + RawTaggedField::UnknownSemantics(unknown_semantics_field(1023)), + RawTaggedField::UnknownSemantics(unknown_semantics_field(576)), + ], + }, + }; + + // Build signed invoice + let hash = [ + 0x75, 0x99, 0xe1, 0x51, 0x7f, 0xa1, 0x0e, 0xb5, 0xc0, 0x79, 0xb4, 0x6e, 0x8e, 0x62, + 0x0c, 0x4f, 0xb0, 0x72, 0x71, 0xd2, 0x81, 0xa1, 0x92, 0x65, 0x9c, 0x90, 0x89, 0x69, + 0xe1, 0xf3, 0xd6, 0x59, + ]; + let signature = &[ + 0x6c, 0xbe, 0xbe, 0xfe, 0xd3, 0xfb, 0x07, 0x68, 0xb5, 0x79, 0x98, 0x82, 0x29, 0xab, + 0x0e, 0xcc, 0x8d, 0x3a, 0x81, 0xee, 0xee, 0x07, 0xb3, 0x5d, 0x64, 0xca, 0xb4, 0x12, + 0x33, 0x99, 0x33, 0x2a, 0x31, 0xc2, 0x2c, 0x2b, 0x62, 0x96, 0x4e, 0x37, 0xd7, 0x96, + 0x50, 0x5e, 0xdb, 0xe9, 0xa9, 0x5b, 0x0b, 0x3b, 0x87, 0x22, 0x89, 0xed, 0x95, 0xf1, + 0xf1, 0xdf, 0x2d, 0xb6, 0xbd, 0xf5, 0x0a, 0x20, + ]; + let signature = Bolt11InvoiceSignature( + RecoverableSignature::from_compact(signature, RecoveryId::from_i32(1).unwrap()) + .unwrap(), + ); + let signed_invoice = SignedRawBolt11Invoice { raw_invoice, hash, signature }; + + // Ensure serialized invoice roundtrips + let invoice = Bolt11Invoice::from_signed(signed_invoice).unwrap(); + let invoice_str = invoice.to_string(); + assert_eq!(invoice_str.len(), crate::MAX_LENGTH); + assert_eq!(invoice, Bolt11Invoice::from_str(&invoice_str).unwrap()); + } + + // Test that invoices above the maximum length fail to parse with the expected error. + #[test] + fn test_deser_too_long_fails() { + use crate::{Bolt11Invoice, ParseOrSemanticError, MAX_LENGTH}; + use bech32::primitives::decode::{CheckedHrpstringError, ChecksumError}; + + fn parse_is_code_length_err(s: &str) -> bool { + // Need matches! b/c ChecksumError::CodeLength(_) is marked non-exhaustive + matches!( + Bolt11Invoice::from_str(s), + Err(ParseOrSemanticError::ParseError(Bolt11ParseError::Bech32Error( + CheckedHrpstringError::Checksum(ChecksumError::CodeLength(_)) + ))), + ) + } + + let mut too_long = String::from("lnbc1"); + too_long.push_str( + String::from_utf8(vec![b'x'; (MAX_LENGTH + 1) - too_long.len()]).unwrap().as_str(), + ); + assert!(parse_is_code_length_err(&too_long)); + assert!(!parse_is_code_length_err(&too_long[..too_long.len() - 1])); + } } diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 17cc41f9502..b814210b390 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -31,7 +31,7 @@ extern crate serde; use std::time::SystemTime; use bech32::primitives::decode::CheckedHrpstringError; -use bech32::Fe32; +use bech32::{Checksum, Fe32}; use bitcoin::hashes::{sha256, Hash}; use bitcoin::{Address, Network, PubkeyHash, ScriptHash, WitnessProgram, WitnessVersion}; use lightning_types::features::Bolt11InvoiceFeatures; @@ -147,6 +147,32 @@ pub const DEFAULT_EXPIRY_TIME: u64 = 3600; /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md pub const DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA: u64 = 18; +/// lightning-invoice will reject BOLT11 invoices that are longer than 7089 bytes. +/// +/// ### Rationale +/// +/// This value matches LND's implementation, which was chosen to be "the max number +/// of bytes that can fit in a QR code". LND's rationale is technically incorrect +/// as QR codes actually have a max capacity of 7089 _numeric_ characters and only +/// support up to 4296 all-uppercase alphanumeric characters. However, ecosystem-wide +/// consistency is more important. +pub const MAX_LENGTH: usize = 7089; + +/// The [`bech32::Bech32`] checksum algorithm, with extended max length suitable +/// for BOLT11 invoices. +pub enum Bolt11Bech32 {} + +impl Checksum for Bolt11Bech32 { + /// Extend the max length from the 1023 bytes default. + const CODE_LENGTH: usize = MAX_LENGTH; + + // Inherit the other fields from `bech32::Bech32`. + type MidstateRepr = ::MidstateRepr; + const CHECKSUM_LENGTH: usize = bech32::Bech32::CHECKSUM_LENGTH; + const GENERATOR_SH: [Self::MidstateRepr; 5] = bech32::Bech32::GENERATOR_SH; + const TARGET_RESIDUE: Self::MidstateRepr = bech32::Bech32::TARGET_RESIDUE; +} + /// Builder for [`Bolt11Invoice`]s. It's the most convenient and advised way to use this library. It /// ensures that only a semantically and syntactically correct invoice can be built using it. /// From ff9f627aa7dfb20de70bef4f8897f4551279ce4f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 17 Mar 2025 20:42:03 +0000 Subject: [PATCH 2/5] Re-export `lightning-invoice` as `bolt11-invoice` from `lightning` Now that `lightning` depends on `lightning-invoice`, we should re-export it like we do `bitcoin` and `types`. --- lightning/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/lib.rs b/lightning/src/lib.rs index fa9badf87fa..0041143dc79 100644 --- a/lightning/src/lib.rs +++ b/lightning/src/lib.rs @@ -54,6 +54,9 @@ extern crate alloc; pub extern crate lightning_types as types; pub extern crate bitcoin; + +pub extern crate lightning_invoice as bolt11_invoice; + #[cfg(any(test, feature = "std"))] extern crate core; From 74f9098e3f35f7e087e461a2ff73a88c5ae2c4df Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 2 Apr 2025 00:28:27 +0000 Subject: [PATCH 3/5] Make `TestChannelManager` `Sync` In a coming commit we'll need to hold references to `TestChannelManager` in threads, requiring that it be `Sync`. Fairly minor merge conflicts addressed in: * `lightning/src/util/test_utils.rs` --- lightning-persister/src/test_utils.rs | 2 +- lightning/src/ln/functional_test_utils.rs | 6 ++--- lightning/src/util/test_utils.rs | 33 ++++++++++++++++++----- 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index e6ad42e5bcd..8af33cef55b 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -113,7 +113,7 @@ pub(crate) fn do_test_data_migration // Integration-test the given KVStore implementation. Test relaying a few payments and check that // the persisted data is updated the appropriate number of times. -pub(crate) fn do_test_store(store_0: &K, store_1: &K) { +pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let chanmon_cfgs = create_chanmon_cfgs(2); let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let chain_mon_0 = test_utils::TestChainMonitor::new( diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 63341969326..f052cb9e965 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -10,7 +10,7 @@ //! A bunch of useful utilities for building networks of nodes and exchanging messages between //! nodes for functional tests. -use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, chainmonitor::Persist}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason}; @@ -399,7 +399,7 @@ pub struct NodeCfg<'a> { pub override_init_features: Rc>>, } -type TestChannelManager<'node_cfg, 'chan_mon_cfg> = ChannelManager< +pub(crate) type TestChannelManager<'node_cfg, 'chan_mon_cfg> = ChannelManager< &'node_cfg TestChainMonitor<'chan_mon_cfg>, &'chan_mon_cfg test_utils::TestBroadcaster, &'node_cfg test_utils::TestKeysInterface, @@ -3259,7 +3259,7 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec(node_count: usize, chanmon_cfgs: &'a Vec, persisters: Vec<&'a impl Persist>) -> Vec> { +pub fn create_node_cfgs_with_persisters<'a>(node_count: usize, chanmon_cfgs: &'a Vec, persisters: Vec<&'a impl test_utils::SyncPersist>) -> Vec> { let mut nodes = Vec::new(); for i in 0..node_count { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 07b2b19b0d6..7095ac9c0cc 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -335,11 +335,29 @@ impl SignerProvider for OnlyReadsKeysInterface { fn get_shutdown_scriptpubkey(&self) -> Result { Err(()) } } +#[cfg(feature = "std")] +pub trait SyncBroadcaster: chaininterface::BroadcasterInterface + Sync {} +#[cfg(feature = "std")] +pub trait SyncPersist: chainmonitor::Persist + Sync {} +#[cfg(feature = "std")] +impl SyncBroadcaster for T {} +#[cfg(feature = "std")] +impl + Sync> SyncPersist for T {} + +#[cfg(not(feature = "std"))] +pub trait SyncBroadcaster: chaininterface::BroadcasterInterface {} +#[cfg(not(feature = "std"))] +pub trait SyncPersist: chainmonitor::Persist {} +#[cfg(not(feature = "std"))] +impl SyncBroadcaster for T {} +#[cfg(not(feature = "std"))] +impl> SyncPersist for T {} + pub struct TestChainMonitor<'a> { pub added_monitors: Mutex)>>, pub monitor_updates: Mutex>>, pub latest_monitor_update_id: Mutex>, - pub chain_monitor: chainmonitor::ChainMonitor>, + pub chain_monitor: chainmonitor::ChainMonitor, pub keys_manager: &'a TestKeysInterface, /// If this is set to Some(), the next update_channel call (not watch_channel) must be a /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given @@ -350,7 +368,7 @@ pub struct TestChainMonitor<'a> { pub expect_monitor_round_trip_fail: Mutex>, } impl<'a> TestChainMonitor<'a> { - pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist, keys_manager: &'a TestKeysInterface) -> Self { + pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn SyncBroadcaster, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn SyncPersist, keys_manager: &'a TestKeysInterface) -> Self { Self { added_monitors: Mutex::new(Vec::new()), monitor_updates: Mutex::new(new_hash_map()), @@ -1448,18 +1466,19 @@ impl Drop for TestChainSource { pub struct TestScorer { /// Stores a tuple of (scid, ChannelUsage) - scorer_expectations: RefCell>>, + scorer_expectations: Mutex>>, } impl TestScorer { pub fn new() -> Self { Self { - scorer_expectations: RefCell::new(None), + scorer_expectations: Mutex::new(None), } } pub fn expect_usage(&self, scid: u64, expectation: ChannelUsage) { - self.scorer_expectations.borrow_mut().get_or_insert_with(|| VecDeque::new()).push_back((scid, expectation)); + let mut expectations = self.scorer_expectations.lock().unwrap(); + expectations.get_or_insert_with(|| VecDeque::new()).push_back((scid, expectation)); } } @@ -1477,7 +1496,7 @@ impl ScoreLookUp for TestScorer { Some(scid) => scid, None => return 0, }; - if let Some(scorer_expectations) = self.scorer_expectations.borrow_mut().as_mut() { + if let Some(scorer_expectations) = self.scorer_expectations.lock().unwrap().as_mut() { match scorer_expectations.pop_front() { Some((scid, expectation)) => { assert_eq!(expectation, usage); @@ -1511,7 +1530,7 @@ impl Drop for TestScorer { return; } - if let Some(scorer_expectations) = self.scorer_expectations.borrow().as_ref() { + if let Some(scorer_expectations) = self.scorer_expectations.lock().unwrap().as_ref() { if !scorer_expectations.is_empty() { panic!("Unsatisfied scorer expectations: {:?}", scorer_expectations) } From 1c50b9ef729b12007d6f4ef9695fc8b98683b5fb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 21 Mar 2025 23:25:07 +0000 Subject: [PATCH 4/5] Use fair unlocking (via `parking_lot`) in tests In a comming commit we'll add a test that relies heavily on lock fairness, which is not provided by the default Rust `Mutex`. Luckily, `parking_lot` provided an `unlock_fair`, which we use here, though it implies we have to manually implement lock poisoning. Trivial merge conflict resolved in `lightning/Cargo.toml` --- lightning-liquidity/Cargo.toml | 1 + lightning/Cargo.toml | 1 + lightning/src/lib.rs | 2 + lightning/src/ln/monitor_tests.rs | 12 ++--- lightning/src/sync/debug_sync.rs | 85 ++++++++++++++++++++----------- 5 files changed, 65 insertions(+), 36 deletions(-) diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index ed229b8b69a..f6bebca3d15 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -38,6 +38,7 @@ lightning-background-processor = { version = "0.1.0", path = "../lightning-backg proptest = "1.0.0" tokio = { version = "1.35", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } +parking_lot = { version = "0.12", default-features = false } [lints.rust.unexpected_cfgs] level = "forbid" diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 6417d231f9e..e4f3eed900b 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -51,6 +51,7 @@ libm = { version = "0.2", default-features = false } [dev-dependencies] regex = "1.5.6" lightning-types = { version = "0.2.0", path = "../lightning-types", features = ["_test_utils"] } +parking_lot = { version = "0.12", default-features = false } [dev-dependencies.bitcoin] version = "0.32.2" diff --git a/lightning/src/lib.rs b/lightning/src/lib.rs index 0041143dc79..4fa2871ddcb 100644 --- a/lightning/src/lib.rs +++ b/lightning/src/lib.rs @@ -66,6 +66,8 @@ extern crate core; #[cfg(ldk_bench)] extern crate criterion; +#[cfg(all(feature = "std", test))] extern crate parking_lot; + #[macro_use] pub mod util; pub mod chain; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 7a20a79159a..92b19790be5 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -3300,10 +3300,10 @@ fn test_update_replay_panics() { // Ensure applying the force-close update skipping the last normal update fails let poisoned_monitor = monitor.clone(); - std::panic::catch_unwind(|| { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger); // We should panic, rather than returning an error here. - }).unwrap_err(); + })).unwrap_err(); // Then apply the last normal and force-close update and make sure applying the preimage // updates out-of-order fails. @@ -3311,17 +3311,17 @@ fn test_update_replay_panics() { monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); let poisoned_monitor = monitor.clone(); - std::panic::catch_unwind(|| { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _ = poisoned_monitor.update_monitor(&updates[3], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger); // We should panic, rather than returning an error here. - }).unwrap_err(); + })).unwrap_err(); // Make sure re-applying the force-close update fails let poisoned_monitor = monitor.clone(); - std::panic::catch_unwind(|| { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger); // We should panic, rather than returning an error here. - }).unwrap_err(); + })).unwrap_err(); // ...and finally ensure that applying all the updates succeeds. monitor.update_monitor(&updates[2], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); diff --git a/lightning/src/sync/debug_sync.rs b/lightning/src/sync/debug_sync.rs index f142328e45c..991a71ffbe0 100644 --- a/lightning/src/sync/debug_sync.rs +++ b/lightning/src/sync/debug_sync.rs @@ -5,15 +5,16 @@ use core::time::Duration; use std::cell::RefCell; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Condvar as StdCondvar; -use std::sync::Mutex as StdMutex; -use std::sync::MutexGuard as StdMutexGuard; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::RwLock as StdRwLock; use std::sync::RwLockReadGuard as StdRwLockReadGuard; use std::sync::RwLockWriteGuard as StdRwLockWriteGuard; -pub use std::sync::WaitTimeoutResult; +use parking_lot::Condvar as StdCondvar; +use parking_lot::Mutex as StdMutex; +use parking_lot::MutexGuard as StdMutexGuard; + +pub use parking_lot::WaitTimeoutResult; use crate::prelude::*; @@ -46,10 +47,9 @@ impl Condvar { &'a self, guard: MutexGuard<'a, T>, condition: F, ) -> LockResult> { let mutex: &'a Mutex = guard.mutex; - self.inner - .wait_while(guard.into_inner(), condition) - .map(|lock| MutexGuard { mutex, lock }) - .map_err(|_| ()) + let mut lock = guard.into_inner(); + self.inner.wait_while(&mut lock, condition); + Ok(MutexGuard { mutex, lock: Some(lock) }) } #[allow(unused)] @@ -57,10 +57,9 @@ impl Condvar { &'a self, guard: MutexGuard<'a, T>, dur: Duration, condition: F, ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> { let mutex = guard.mutex; - self.inner - .wait_timeout_while(guard.into_inner(), dur, condition) - .map_err(|_| ()) - .map(|(lock, e)| (MutexGuard { mutex, lock }, e)) + let mut lock = guard.into_inner(); + let e = self.inner.wait_while_for(&mut lock, condition, dur); + Ok((MutexGuard { mutex, lock: Some(lock) }, e)) } pub fn notify_all(&self) { @@ -150,7 +149,7 @@ impl LockMetadata { LOCKS_INIT.call_once(|| unsafe { LOCKS = Some(StdMutex::new(new_hash_map())); }); - let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap(); + let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock(); match locks.entry(lock_constr_location) { hash_map::Entry::Occupied(e) => { assert_eq!(lock_constr_colno, @@ -185,7 +184,7 @@ impl LockMetadata { } } for (_locked_idx, locked) in held.borrow().iter() { - for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() { + for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().iter() { let is_dep_this_lock = *locked_dep_idx == this.lock_idx; let has_same_construction = *locked_dep_idx == locked.lock_idx; if is_dep_this_lock && !has_same_construction { @@ -210,7 +209,7 @@ impl LockMetadata { } } // Insert any already-held locks in our locked-before set. - let mut locked_before = this.locked_before.lock().unwrap(); + let mut locked_before = this.locked_before.lock(); if !locked_before.contains_key(&locked.lock_idx) { let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() }; locked_before.insert(lockdep.lock.lock_idx, lockdep); @@ -237,7 +236,7 @@ impl LockMetadata { // Since a try-lock will simply fail if the lock is held already, we do not // consider try-locks to ever generate lockorder inversions. However, if a try-lock // succeeds, we do consider it to have created lockorder dependencies. - let mut locked_before = this.locked_before.lock().unwrap(); + let mut locked_before = this.locked_before.lock(); for (locked_idx, locked) in held.borrow().iter() { if !locked_before.contains_key(locked_idx) { let lockdep = @@ -252,11 +251,17 @@ impl LockMetadata { pub struct Mutex { inner: StdMutex, + poisoned: AtomicBool, deps: Arc, } + impl Mutex { pub(crate) fn into_inner(self) -> LockResult { - self.inner.into_inner().map_err(|_| ()) + if self.poisoned.load(Ordering::Acquire) { + Err(()) + } else { + Ok(self.inner.into_inner()) + } } } @@ -278,14 +283,14 @@ impl fmt::Debug for Mutex { #[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, T: Sized + 'a> { mutex: &'a Mutex, - lock: StdMutexGuard<'a, T>, + lock: Option>, } impl<'a, T: Sized> MutexGuard<'a, T> { fn into_inner(self) -> StdMutexGuard<'a, T> { // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509. unsafe { - let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock); + let v: StdMutexGuard<'a, T> = std::ptr::read(self.lock.as_ref().unwrap()); std::mem::forget(self); v } @@ -297,6 +302,10 @@ impl Drop for MutexGuard<'_, T> { LOCKS_HELD.with(|held| { held.borrow_mut().remove(&self.mutex.deps.lock_idx); }); + if std::thread::panicking() { + self.mutex.poisoned.store(true, Ordering::Release); + } + StdMutexGuard::unlock_fair(self.lock.take().unwrap()); } } @@ -304,37 +313,52 @@ impl Deref for MutexGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - &self.lock.deref() + &self.lock.as_ref().unwrap().deref() } } impl DerefMut for MutexGuard<'_, T> { fn deref_mut(&mut self) -> &mut T { - self.lock.deref_mut() + self.lock.as_mut().unwrap().deref_mut() } } impl Mutex { pub fn new(inner: T) -> Mutex { - Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() } + Mutex { + inner: StdMutex::new(inner), + poisoned: AtomicBool::new(false), + deps: LockMetadata::new(), + } } pub fn lock<'a>(&'a self) -> LockResult> { LockMetadata::pre_lock(&self.deps, false); - self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()) + let lock = self.inner.lock(); + if self.poisoned.load(Ordering::Acquire) { + Err(()) + } else { + Ok(MutexGuard { mutex: self, lock: Some(lock) }) + } } pub fn try_lock<'a>(&'a self) -> LockResult> { - let res = - self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()); + let res = self.inner.try_lock().ok_or(()); if res.is_ok() { + if self.poisoned.load(Ordering::Acquire) { + return Err(()); + } LockMetadata::try_locked(&self.deps); } - res + res.map(|lock| MutexGuard { mutex: self, lock: Some(lock) }) } pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> { - self.inner.get_mut().map_err(|_| ()) + if self.poisoned.load(Ordering::Acquire) { + Err(()) + } else { + Ok(self.inner.get_mut()) + } } } @@ -345,9 +369,10 @@ impl<'a, T: 'a> LockTestExt<'a> for Mutex { } type ExclLock = MutexGuard<'a, T>; #[inline] - fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard { + fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<'a, T> { LockMetadata::pre_lock(&self.deps, true); - self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap() + let lock = self.inner.lock(); + MutexGuard { mutex: self, lock: Some(lock) } } } From 5006c6b805871a6283a3358e0589510d72bbc1c9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 28 Mar 2025 21:56:17 +0000 Subject: [PATCH 5/5] Do not track HTLC IDs as separate MPP parts which need claiming When we claim an MPP payment, we need to track which channels have had the preimage durably added to their `ChannelMonitor` to ensure we don't remove the preimage from any `ChannelMonitor`s until all `ChannelMonitor`s have the preimage. Previously, we tracked each MPP part, down to the HTLC ID, as a part which we needed to get the preimage on disk for. However, this is not necessary - once a `ChannelMonitor` has a preimage, it applies it to all inbound HTLCs with the same payment hash. Further, this can cause a channel to wait on itself in cases of high-latency synchronous persistence - * If we have receive an MPP payment for which multiple parts came to us over the same channel, * and claim the MPP payment, creating a `ChannelMonitorUpdate` for the first part but enqueueing the remaining HTLC claim(s) in the channel's holding cell, * and we receive a `revoke_and_ack` for the same channel before the `ChannelManager::claim_payment` method completes (as each claim waits for the `ChannelMonitorUpdate` persistence), * we will cause the `ChannelMonitorUpdate` for that `revoke_and_ack` to go into the blocked set, waiting on the MPP parts to be fully claimed, * but when `claim_payment` goes to add the next `ChannelMonitorUpdate` for the MPP claim, it will be placed in the blocked set, since the blocked set is non-empty. Thus, we'll end up with a `ChannelMonitorUpdate` in the blocked set which is needed to unblock the channel since it is a part of the MPP set which blocked the channel. Trivial conflicts resolved in `lightning/src/util/test_utils.rs` --- lightning/src/ln/chanmon_update_fail_tests.rs | 222 ++++++++++++++++++ lightning/src/ln/channelmanager.rs | 60 +++-- lightning/src/ln/functional_test_utils.rs | 20 ++ lightning/src/util/test_utils.rs | 14 ++ 4 files changed, 290 insertions(+), 26 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 2d01ece1158..ad1e6c26b98 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3819,3 +3819,225 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); } + +#[test] +#[cfg(all(feature = "std", not(target_os = "windows")))] +fn test_single_channel_multiple_mpp() { + use std::sync::atomic::{AtomicBool, Ordering}; + + // Test what happens when we attempt to claim an MPP with many parts that came to us through + // the same channel with a synchronous persistence interface which has very high latency. + // + // Previously, if a `revoke_and_ack` came in while we were still running in + // `ChannelManager::claim_payment` we'd end up hanging waiting to apply a + // `ChannelMonitorUpdate` until after it completed. See the commit which introduced this test + // for more info. + let chanmon_cfgs = create_chanmon_cfgs(9); + let node_cfgs = create_node_cfgs(9, &chanmon_cfgs); + let configs = [None, None, None, None, None, None, None, None, None]; + let node_chanmgrs = create_node_chanmgrs(9, &node_cfgs, &configs); + let mut nodes = create_network(9, &node_cfgs, &node_chanmgrs); + + let node_7_id = nodes[7].node.get_our_node_id(); + let node_8_id = nodes[8].node.get_our_node_id(); + + // Send an MPP payment in six parts along the path shown from top to bottom + // 0 + // 1 2 3 4 5 6 + // 7 + // 8 + // + // We can in theory reproduce this issue with fewer channels/HTLCs, but getting this test + // robust is rather challenging. We rely on having the main test thread wait on locks held in + // the background `claim_funds` thread and unlocking when the `claim_funds` thread completes a + // single `ChannelMonitorUpdate`. + // This thread calls `get_and_clear_pending_msg_events()` and `handle_revoke_and_ack()`, both + // of which require `ChannelManager` locks, but we have to make sure this thread gets a chance + // to be blocked on the mutexes before we let the background thread wake `claim_funds` so that + // the mutex can switch to this main thread. + // This relies on our locks being fair, but also on our threads getting runtime during the test + // run, which can be pretty competitive. Thus we do a dumb dance to be as conservative as + // possible - we have a background thread which completes a `ChannelMonitorUpdate` (by sending + // into the `write_blocker` mpsc) but it doesn't run until a mpsc channel sends from this main + // thread to the background thread, and then we let it sleep a while before we send the + // `ChannelMonitorUpdate` unblocker. + // Further, we give ourselves two chances each time, needing 4 HTLCs just to unlock our two + // `ChannelManager` calls. We then need a few remaining HTLCs to actually trigger the bug, so + // we use 6 HTLCs. + // Finaly, we do not run this test on Winblowz because it, somehow, in 2025, does not implement + // actual preemptive multitasking and thinks that cooperative multitasking somehow is + // acceptable in the 21st century, let alone a quarter of the way into it. + const MAX_THREAD_INIT_TIME: std::time::Duration = std::time::Duration::from_secs(1); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 3, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 4, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 5, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 6, 100_000, 0); + + create_announced_chan_between_nodes_with_value(&nodes, 1, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 2, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 3, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 4, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 5, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 6, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 7, 8, 1_000_000, 0); + + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[8], 50_000_000); + + send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[7], &nodes[8]], &[&nodes[2], &nodes[7], &nodes[8]], &[&nodes[3], &nodes[7], &nodes[8]], &[&nodes[4], &nodes[7], &nodes[8]], &[&nodes[5], &nodes[7], &nodes[8]], &[&nodes[6], &nodes[7], &nodes[8]]], 50_000_000, payment_hash, payment_secret); + + let (do_a_write, blocker) = std::sync::mpsc::sync_channel(0); + *nodes[8].chain_monitor.write_blocker.lock().unwrap() = Some(blocker); + + // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }. + // We do this by casting a pointer to a `TestChannelManager` to a pointer to a + // `TestChannelManager` with different (in this case 'static) lifetime. + // This is even suggested in the second example at + // https://doc.rust-lang.org/std/mem/fn.transmute.html#examples + let claim_node: &'static TestChannelManager<'static, 'static> = + unsafe { std::mem::transmute(nodes[8].node as &TestChannelManager) }; + let thrd = std::thread::spawn(move || { + // Initiate the claim in a background thread as it will immediately block waiting on the + // `write_blocker` we set above. + claim_node.claim_funds(payment_preimage); + }); + + // First unlock one monitor so that we have a pending + // `update_fulfill_htlc`/`commitment_signed` pair to pass to our counterparty. + do_a_write.send(()).unwrap(); + + // Then fetch the `update_fulfill_htlc`/`commitment_signed`. Note that the + // `get_and_clear_pending_msg_events` will immediately hang trying to take a peer lock which + // `claim_funds` is holding. Thus, we release a second write after a small sleep in the + // background to give `claim_funds` a chance to step forward, unblocking + // `get_and_clear_pending_msg_events`. + let do_a_write_background = do_a_write.clone(); + let block_thrd2 = AtomicBool::new(true); + let block_thrd2_read: &'static AtomicBool = unsafe { std::mem::transmute(&block_thrd2) }; + let thrd2 = std::thread::spawn(move || { + while block_thrd2_read.load(Ordering::Acquire) { + std::thread::yield_now(); + } + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + }); + block_thrd2.store(false, Ordering::Release); + let first_updates = get_htlc_update_msgs(&nodes[8], &nodes[7].node.get_our_node_id()); + thrd2.join().unwrap(); + + // Disconnect node 6 from all its peers so it doesn't bother to fail the HTLCs back + nodes[7].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[2].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[3].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[4].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[5].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[6].node.get_our_node_id()); + + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &first_updates.update_fulfill_htlcs[0]); + check_added_monitors(&nodes[7], 1); + expect_payment_forwarded!(nodes[7], nodes[1], nodes[8], Some(1000), false, false); + nodes[7].node.handle_commitment_signed(node_8_id, &first_updates.commitment_signed); + check_added_monitors(&nodes[7], 1); + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + + // Now, handle the `revoke_and_ack` from node 5. Note that `claim_funds` is still blocked on + // our peer lock, so we have to release a write to let it process. + // After this call completes, the channel previously would be locked up and should not be able + // to make further progress. + let do_a_write_background = do_a_write.clone(); + let block_thrd3 = AtomicBool::new(true); + let block_thrd3_read: &'static AtomicBool = unsafe { std::mem::transmute(&block_thrd3) }; + let thrd3 = std::thread::spawn(move || { + while block_thrd3_read.load(Ordering::Acquire) { + std::thread::yield_now(); + } + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + }); + block_thrd3.store(false, Ordering::Release); + nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + thrd3.join().unwrap(); + assert!(!thrd.is_finished()); + + let thrd4 = std::thread::spawn(move || { + do_a_write.send(()).unwrap(); + do_a_write.send(()).unwrap(); + }); + + thrd4.join().unwrap(); + thrd.join().unwrap(); + + expect_payment_claimed!(nodes[8], payment_hash, 50_000_000); + + // At the end, we should have 7 ChannelMonitorUpdates - 6 for HTLC claims, and one for the + // above `revoke_and_ack`. + check_added_monitors(&nodes[8], 7); + + // Now drive everything to the end, at least as far as node 7 is concerned... + *nodes[8].chain_monitor.write_blocker.lock().unwrap() = None; + nodes[8].node.handle_commitment_signed(node_7_id, &cs); + check_added_monitors(&nodes[8], 1); + + let (updates, raa) = get_updates_and_revoke(&nodes[8], &nodes[7].node.get_our_node_id()); + + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[7], nodes[2], nodes[8], Some(1000), false, false); + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + expect_payment_forwarded!(nodes[7], nodes[3], nodes[8], Some(1000), false, false); + let mut next_source = 4; + if let Some(update) = updates.update_fulfill_htlcs.get(2) { + nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + expect_payment_forwarded!(nodes[7], nodes[4], nodes[8], Some(1000), false, false); + next_source += 1; + } + + nodes[7].node.handle_commitment_signed(node_8_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + if updates.update_fulfill_htlcs.get(2).is_some() { + check_added_monitors(&nodes[7], 5); + } else { + check_added_monitors(&nodes[7], 4); + } + + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + + nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + nodes[8].node.handle_commitment_signed(node_7_id, &cs); + check_added_monitors(&nodes[8], 2); + + let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_7_id); + + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); + next_source += 1; + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); + next_source += 1; + if let Some(update) = updates.update_fulfill_htlcs.get(2) { + nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); + } + + nodes[7].node.handle_commitment_signed(node_8_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + if updates.update_fulfill_htlcs.get(2).is_some() { + check_added_monitors(&nodes[7], 5); + } else { + check_added_monitors(&nodes[7], 4); + } + + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + nodes[8].node.handle_commitment_signed(node_7_id, &cs); + check_added_monitors(&nodes[8], 2); + + let raa = get_event_msg!(nodes[8], MessageSendEvent::SendRevokeAndACK, node_7_id); + nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + check_added_monitors(&nodes[7], 1); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9ede93c93d1..14d3c0ea5cb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1105,7 +1105,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// A pending MPP claim which hasn't yet completed. /// /// Not written to disk. - pending_mpp_claim: Option<(PublicKey, ChannelId, u64, PendingMPPClaimPointer)>, + pending_mpp_claim: Option<(PublicKey, ChannelId, PendingMPPClaimPointer)>, }, /// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the /// operation of another channel. @@ -1207,10 +1207,16 @@ impl From<&MPPClaimHTLCSource> for HTLCClaimSource { } } +#[derive(Debug)] +pub(crate) struct PendingMPPClaim { + channels_without_preimage: Vec<(PublicKey, OutPoint, ChannelId)>, + channels_with_preimage: Vec<(PublicKey, OutPoint, ChannelId)>, +} + #[derive(Clone, Debug, Hash, PartialEq, Eq)] /// The source of an HTLC which is being claimed as a part of an incoming payment. Each part is -/// tracked in [`PendingMPPClaim`] as well as in [`ChannelMonitor`]s, so that it can be converted -/// to an [`HTLCClaimSource`] for claim replays on startup. +/// tracked in [`ChannelMonitor`]s, so that it can be converted to an [`HTLCClaimSource`] for claim +/// replays on startup. struct MPPClaimHTLCSource { counterparty_node_id: PublicKey, funding_txo: OutPoint, @@ -1225,12 +1231,6 @@ impl_writeable_tlv_based!(MPPClaimHTLCSource, { (6, htlc_id, required), }); -#[derive(Debug)] -pub(crate) struct PendingMPPClaim { - channels_without_preimage: Vec, - channels_with_preimage: Vec, -} - #[derive(Clone, Debug, PartialEq, Eq)] /// When we're claiming a(n MPP) payment, we want to store information about that payment in the /// [`ChannelMonitor`] so that we can replay the claim without any information from the @@ -7017,8 +7017,15 @@ where } }).collect(); let pending_mpp_claim_ptr_opt = if sources.len() > 1 { + let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len()); + for part in mpp_parts.iter() { + let chan = (part.counterparty_node_id, part.funding_txo, part.channel_id); + if !channels_without_preimage.contains(&chan) { + channels_without_preimage.push(chan); + } + } Some(Arc::new(Mutex::new(PendingMPPClaim { - channels_without_preimage: mpp_parts.clone(), + channels_without_preimage, channels_with_preimage: Vec::new(), }))) } else { @@ -7029,7 +7036,7 @@ where let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); - Some((cp_id, htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id, claim_ptr)) + Some((cp_id, htlc.prev_hop.channel_id, claim_ptr)) } else { None } @@ -7375,7 +7382,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { - if let Some((counterparty_node_id, chan_id, htlc_id, claim_ptr)) = pending_mpp_claim { + if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { let mut peer_state = peer_state_mutex.lock().unwrap(); @@ -7386,24 +7393,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if *pending_claim == claim_ptr { let mut pending_claim_state_lock = pending_claim.0.lock().unwrap(); let pending_claim_state = &mut *pending_claim_state_lock; - pending_claim_state.channels_without_preimage.retain(|htlc_info| { + pending_claim_state.channels_without_preimage.retain(|(cp, op, cid)| { let this_claim = - htlc_info.counterparty_node_id == counterparty_node_id - && htlc_info.channel_id == chan_id - && htlc_info.htlc_id == htlc_id; + *cp == counterparty_node_id && *cid == chan_id; if this_claim { - pending_claim_state.channels_with_preimage.push(htlc_info.clone()); + pending_claim_state.channels_with_preimage.push((*cp, *op, *cid)); false } else { true } }); if pending_claim_state.channels_without_preimage.is_empty() { - for htlc_info in pending_claim_state.channels_with_preimage.iter() { - let freed_chan = ( - htlc_info.counterparty_node_id, - htlc_info.funding_txo, - htlc_info.channel_id, - blocker.clone() - ); + for (cp, op, cid) in pending_claim_state.channels_with_preimage.iter() { + let freed_chan = (*cp, *op, *cid, blocker.clone()); freed_channels.push(freed_chan); } } @@ -14232,8 +14232,16 @@ where if payment_claim.mpp_parts.is_empty() { return Err(DecodeError::InvalidValue); } + let mut channels_without_preimage = payment_claim.mpp_parts.iter() + .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id)) + .collect::>(); + // If we have multiple MPP parts which were received over the same channel, + // we only track it once as once we get a preimage durably in the + // `ChannelMonitor` it will be used for all HTLCs with a matching hash. + channels_without_preimage.sort_unstable(); + channels_without_preimage.dedup(); let pending_claims = PendingMPPClaim { - channels_without_preimage: payment_claim.mpp_parts.clone(), + channels_without_preimage, channels_with_preimage: Vec::new(), }; let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims))); @@ -14266,7 +14274,7 @@ where for part in payment_claim.mpp_parts.iter() { let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| ( - part.counterparty_node_id, part.channel_id, part.htlc_id, + part.counterparty_node_id, part.channel_id, PendingMPPClaimPointer(Arc::clone(&ptr)) )); let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index f052cb9e965..be77547b79c 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -779,6 +779,26 @@ pub fn get_revoke_commit_msgs>(node: & }) } +/// Gets a `UpdateHTLCs` and `revoke_and_ack` (i.e. after we get a responding `commitment_signed` +/// while we have updates in the holding cell). +pub fn get_updates_and_revoke>(node: &H, recipient: &PublicKey) -> (msgs::CommitmentUpdate, msgs::RevokeAndACK) { + let events = node.node().get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + (match events[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(node_id, recipient); + (*updates).clone() + }, + _ => panic!("Unexpected event"), + }, match events[1] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(node_id, recipient); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }) +} + #[macro_export] /// Gets an RAA and CS which were sent in response to a commitment update /// diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 7095ac9c0cc..5bd5acaf176 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -366,6 +366,8 @@ pub struct TestChainMonitor<'a> { /// If this is set to Some(), the next round trip serialization check will not hold after an /// update_channel call (not watch_channel) for the given channel_id. pub expect_monitor_round_trip_fail: Mutex>, + #[cfg(feature = "std")] + pub write_blocker: Mutex>>, } impl<'a> TestChainMonitor<'a> { pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn SyncBroadcaster, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn SyncPersist, keys_manager: &'a TestKeysInterface) -> Self { @@ -377,6 +379,8 @@ impl<'a> TestChainMonitor<'a> { keys_manager, expect_channel_force_closed: Mutex::new(None), expect_monitor_round_trip_fail: Mutex::new(None), + #[cfg(feature = "std")] + write_blocker: Mutex::new(None), } } @@ -387,6 +391,11 @@ impl<'a> TestChainMonitor<'a> { } impl<'a> chain::Watch for TestChainMonitor<'a> { fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result { + #[cfg(feature = "std")] + if let Some(blocker) = &*self.write_blocker.lock().unwrap() { + blocker.recv().unwrap(); + } + // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... let mut w = TestVecWriter(Vec::new()); @@ -401,6 +410,11 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { } fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { + #[cfg(feature = "std")] + if let Some(blocker) = &*self.write_blocker.lock().unwrap() { + blocker.recv().unwrap(); + } + // Every monitor update should survive roundtrip let mut w = TestVecWriter(Vec::new()); update.write(&mut w).unwrap();