From 4abe3d86d1b1bf45ae69744fbb6b32aa925643fe Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 9 Jun 2025 11:28:11 +0200 Subject: [PATCH 001/184] Update `CHANGELOG` for v0.6.0 --- CHANGELOG.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 931b98f28..29b6f748c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,31 @@ +# 0.6.0 - Jun. 9, 2025 +This sixth minor release mainly fixes an issue that could have left the +on-chain wallet unable to spend funds if transactions that had previously been +accepted to the mempool ended up being evicted. + +## Feature and API updates +- Onchain addresses are now validated against the expected network before use (#519). +- The API methods on the `Bolt11Invoice` type are now exposed in bindings (#522). +- The `UnifiedQrPayment::receive` flow no longer aborts if we're unable to generate a BOLT12 offer (#548). + +## Bug Fixes and Improvements +- Previously, the node could potentially enter a state that would have left the + onchain wallet unable spend any funds if previously-generated transactions + had been first accepted, and then evicted from the mempool. This has been + fixed in BDK 2.0.0, to which we upgrade as part of this release. (#551) +- A bug that had us fail `OnchainPayment::send_all` in the `retrain_reserves` + mode when requiring sub-dust-limit anchor reserves has been fixed (#540). +- The output of the `log` facade logger has been corrected (#547). + +## Compatibility Notes +- The BDK dependency has been bumped to `bdk_wallet` v2.0 (#551). + +In total, this release features 20 files changed, 1188 insertions, 447 deletions, in 18 commits from 3 authors in alphabetical order: + +- alexanderwiederin +- Camillarhi +- Elias Rohrer + # 0.5.0 - Apr. 29, 2025 Besides numerous API improvements and bugfixes this fifth minor release notably adds support for sourcing chain and fee rate data from an Electrum backend, requesting channels via the [bLIP-51 / LSPS1](https://github.com/lightning/blips/blob/master/blip-0051.md) protocol, as well as experimental support for operating as a [bLIP-52 / LSPS2](https://github.com/lightning/blips/blob/master/blip-0052.md) service. From 35524cb0bc388173e2b697a049d65db0ccac8c49 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 9 Jun 2025 11:31:39 +0200 Subject: [PATCH 002/184] Bump version number post v0.6.0 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 8c91e9b79..bf8bed08c 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ldk-node" -version = "0.6.0+git" +version = "0.7.0+git" authors = ["Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" From a6697b8559333cec6caa142d47f8b5b0ad5ae707 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 9 Jun 2025 14:41:13 +0200 Subject: [PATCH 003/184] Bump version number for bindings --- Package.swift | 2 +- bindings/kotlin/ldk-node-android/gradle.properties | 2 +- bindings/kotlin/ldk-node-jvm/gradle.properties | 2 +- bindings/python/pyproject.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Package.swift b/Package.swift index 61da2d5e7..f6bad5e3c 100644 --- a/Package.swift +++ b/Package.swift @@ -3,7 +3,7 @@ import PackageDescription -let tag = "v0.5.0" +let tag = "v0.6.0" let checksum = "fd9eb84a478402af8f790519a463b6e1bf6ab3987f5951cd8375afb9d39e7a4b" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" diff --git a/bindings/kotlin/ldk-node-android/gradle.properties b/bindings/kotlin/ldk-node-android/gradle.properties index 1976b7123..578c3308b 100644 --- a/bindings/kotlin/ldk-node-android/gradle.properties +++ b/bindings/kotlin/ldk-node-android/gradle.properties @@ -2,4 +2,4 @@ org.gradle.jvmargs=-Xmx1536m android.useAndroidX=true android.enableJetifier=true kotlin.code.style=official -libraryVersion=0.5.0 +libraryVersion=0.6.0 diff --git a/bindings/kotlin/ldk-node-jvm/gradle.properties b/bindings/kotlin/ldk-node-jvm/gradle.properties index 62d660235..913b5caea 100644 --- a/bindings/kotlin/ldk-node-jvm/gradle.properties +++ b/bindings/kotlin/ldk-node-jvm/gradle.properties @@ -1,3 +1,3 @@ org.gradle.jvmargs=-Xmx1536m kotlin.code.style=official -libraryVersion=0.5.0 +libraryVersion=0.6.0 diff --git a/bindings/python/pyproject.toml b/bindings/python/pyproject.toml index 8af86d2bb..496781a6a 100644 --- a/bindings/python/pyproject.toml +++ b/bindings/python/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ldk_node" -version = "0.5.0" +version = "0.6.0" authors = [ { name="Elias Rohrer", email="dev@tnull.de" }, ] From 415941bb696aea6d644d7ef00e73fe7f877ae3c7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 9 Jun 2025 14:51:29 +0200 Subject: [PATCH 004/184] Update Swift files for 0.6.0 --- Package.swift | 2 +- bindings/swift/Sources/LDKNode/LDKNode.swift | 612 ++++++++++++++++--- 2 files changed, 539 insertions(+), 75 deletions(-) diff --git a/Package.swift b/Package.swift index f6bad5e3c..33c5a70b8 100644 --- a/Package.swift +++ b/Package.swift @@ -4,7 +4,7 @@ import PackageDescription let tag = "v0.6.0" -let checksum = "fd9eb84a478402af8f790519a463b6e1bf6ab3987f5951cd8375afb9d39e7a4b" +let checksum = "8bda396624134e0b592bfcc2f977b9aa5ce8c2ee359c032ae3520869ece8851c" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" let package = Package( diff --git a/bindings/swift/Sources/LDKNode/LDKNode.swift b/bindings/swift/Sources/LDKNode/LDKNode.swift index de5df5e00..442201d31 100644 --- a/bindings/swift/Sources/LDKNode/LDKNode.swift +++ b/bindings/swift/Sources/LDKNode/LDKNode.swift @@ -511,6 +511,252 @@ fileprivate struct FfiConverterData: FfiConverterRustBuffer { +public protocol Bolt11InvoiceProtocol : AnyObject { + + func amountMilliSatoshis() -> UInt64? + + func currency() -> Currency + + func description() -> Bolt11InvoiceDescription + + func expiryTimeSeconds() -> UInt64 + + func fallbackAddresses() -> [Address] + + func isExpired() -> Bool + + func minFinalCltvExpiryDelta() -> UInt64 + + func network() -> Network + + func paymentHash() -> PaymentHash + + func paymentSecret() -> PaymentSecret + + func recoverPayeePubKey() -> PublicKey + + func routeHints() -> [[RouteHintHop]] + + func secondsSinceEpoch() -> UInt64 + + func secondsUntilExpiry() -> UInt64 + + func signableHash() -> [UInt8] + + func wouldExpire(atTimeSeconds: UInt64) -> Bool + +} + +open class Bolt11Invoice: + Bolt11InvoiceProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } + + // TODO: We'd like this to be `private` but for Swifty reasons, + // we can't implement `FfiConverter` without making this `required` and we can't + // make it `required` without making it `public`. + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + self.pointer = pointer + } + + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_bolt11invoice(self.pointer, $0) } + } + // No primary constructor declared for this class. + + deinit { + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_bolt11invoice(pointer, $0) } + } + + +public static func fromStr(invoiceStr: String)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_constructor_bolt11invoice_from_str( + FfiConverterString.lower(invoiceStr),$0 + ) +}) +} + + + +open func amountMilliSatoshis() -> UInt64? { + return try! FfiConverterOptionUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_amount_milli_satoshis(self.uniffiClonePointer(),$0 + ) +}) +} + +open func currency() -> Currency { + return try! FfiConverterTypeCurrency.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_currency(self.uniffiClonePointer(),$0 + ) +}) +} + +open func description() -> Bolt11InvoiceDescription { + return try! FfiConverterTypeBolt11InvoiceDescription.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_description(self.uniffiClonePointer(),$0 + ) +}) +} + +open func expiryTimeSeconds() -> UInt64 { + return try! FfiConverterUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_expiry_time_seconds(self.uniffiClonePointer(),$0 + ) +}) +} + +open func fallbackAddresses() -> [Address] { + return try! FfiConverterSequenceTypeAddress.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_fallback_addresses(self.uniffiClonePointer(),$0 + ) +}) +} + +open func isExpired() -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_is_expired(self.uniffiClonePointer(),$0 + ) +}) +} + +open func minFinalCltvExpiryDelta() -> UInt64 { + return try! FfiConverterUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_min_final_cltv_expiry_delta(self.uniffiClonePointer(),$0 + ) +}) +} + +open func network() -> Network { + return try! FfiConverterTypeNetwork.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_network(self.uniffiClonePointer(),$0 + ) +}) +} + +open func paymentHash() -> PaymentHash { + return try! FfiConverterTypePaymentHash.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_payment_hash(self.uniffiClonePointer(),$0 + ) +}) +} + +open func paymentSecret() -> PaymentSecret { + return try! FfiConverterTypePaymentSecret.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_payment_secret(self.uniffiClonePointer(),$0 + ) +}) +} + +open func recoverPayeePubKey() -> PublicKey { + return try! FfiConverterTypePublicKey.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_recover_payee_pub_key(self.uniffiClonePointer(),$0 + ) +}) +} + +open func routeHints() -> [[RouteHintHop]] { + return try! FfiConverterSequenceSequenceTypeRouteHintHop.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_route_hints(self.uniffiClonePointer(),$0 + ) +}) +} + +open func secondsSinceEpoch() -> UInt64 { + return try! FfiConverterUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_seconds_since_epoch(self.uniffiClonePointer(),$0 + ) +}) +} + +open func secondsUntilExpiry() -> UInt64 { + return try! FfiConverterUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_seconds_until_expiry(self.uniffiClonePointer(),$0 + ) +}) +} + +open func signableHash() -> [UInt8] { + return try! FfiConverterSequenceUInt8.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_signable_hash(self.uniffiClonePointer(),$0 + ) +}) +} + +open func wouldExpire(atTimeSeconds: UInt64) -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_would_expire(self.uniffiClonePointer(), + FfiConverterUInt64.lower(atTimeSeconds),$0 + ) +}) +} + + +} + +public struct FfiConverterTypeBolt11Invoice: FfiConverter { + + typealias FfiType = UnsafeMutableRawPointer + typealias SwiftType = Bolt11Invoice + + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt11Invoice { + return Bolt11Invoice(unsafeFromRawPointer: pointer) + } + + public static func lower(_ value: Bolt11Invoice) -> UnsafeMutableRawPointer { + return value.uniffiClonePointer() + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Bolt11Invoice { + let v: UInt64 = try readInt(&buf) + // The Rust code won't compile if a pointer won't fit in a UInt64. + // We have to go via `UInt` because that's the thing that's the size of a pointer. + let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) + if (ptr == nil) { + throw UniffiInternalError.unexpectedNullPointer + } + return try lift(ptr!) + } + + public static func write(_ value: Bolt11Invoice, into buf: inout [UInt8]) { + // This fiddling is because `Int` is the thing that's the same size as a pointer. + // The Rust code won't compile if a pointer won't fit in a `UInt64`. + writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) + } +} + + + + +public func FfiConverterTypeBolt11Invoice_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(pointer) +} + +public func FfiConverterTypeBolt11Invoice_lower(_ value: Bolt11Invoice) -> UnsafeMutableRawPointer { + return FfiConverterTypeBolt11Invoice.lower(value) +} + + + + public protocol Bolt11PaymentProtocol : AnyObject { func claimForHash(paymentHash: PaymentHash, claimableAmountMsat: UInt64, preimage: PaymentPreimage) throws @@ -2990,36 +3236,6 @@ public struct Bolt11PaymentInfo { -extension Bolt11PaymentInfo: Equatable, Hashable { - public static func ==(lhs: Bolt11PaymentInfo, rhs: Bolt11PaymentInfo) -> Bool { - if lhs.state != rhs.state { - return false - } - if lhs.expiresAt != rhs.expiresAt { - return false - } - if lhs.feeTotalSat != rhs.feeTotalSat { - return false - } - if lhs.orderTotalSat != rhs.orderTotalSat { - return false - } - if lhs.invoice != rhs.invoice { - return false - } - return true - } - - public func hash(into hasher: inout Hasher) { - hasher.combine(state) - hasher.combine(expiresAt) - hasher.combine(feeTotalSat) - hasher.combine(orderTotalSat) - hasher.combine(invoice) - } -} - - public struct FfiConverterTypeBolt11PaymentInfo: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Bolt11PaymentInfo { return @@ -4883,6 +5099,95 @@ public func FfiConverterTypePeerDetails_lower(_ value: PeerDetails) -> RustBuffe } +public struct RouteHintHop { + public var srcNodeId: PublicKey + public var shortChannelId: UInt64 + public var cltvExpiryDelta: UInt16 + public var htlcMinimumMsat: UInt64? + public var htlcMaximumMsat: UInt64? + public var fees: RoutingFees + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(srcNodeId: PublicKey, shortChannelId: UInt64, cltvExpiryDelta: UInt16, htlcMinimumMsat: UInt64?, htlcMaximumMsat: UInt64?, fees: RoutingFees) { + self.srcNodeId = srcNodeId + self.shortChannelId = shortChannelId + self.cltvExpiryDelta = cltvExpiryDelta + self.htlcMinimumMsat = htlcMinimumMsat + self.htlcMaximumMsat = htlcMaximumMsat + self.fees = fees + } +} + + + +extension RouteHintHop: Equatable, Hashable { + public static func ==(lhs: RouteHintHop, rhs: RouteHintHop) -> Bool { + if lhs.srcNodeId != rhs.srcNodeId { + return false + } + if lhs.shortChannelId != rhs.shortChannelId { + return false + } + if lhs.cltvExpiryDelta != rhs.cltvExpiryDelta { + return false + } + if lhs.htlcMinimumMsat != rhs.htlcMinimumMsat { + return false + } + if lhs.htlcMaximumMsat != rhs.htlcMaximumMsat { + return false + } + if lhs.fees != rhs.fees { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(srcNodeId) + hasher.combine(shortChannelId) + hasher.combine(cltvExpiryDelta) + hasher.combine(htlcMinimumMsat) + hasher.combine(htlcMaximumMsat) + hasher.combine(fees) + } +} + + +public struct FfiConverterTypeRouteHintHop: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> RouteHintHop { + return + try RouteHintHop( + srcNodeId: FfiConverterTypePublicKey.read(from: &buf), + shortChannelId: FfiConverterUInt64.read(from: &buf), + cltvExpiryDelta: FfiConverterUInt16.read(from: &buf), + htlcMinimumMsat: FfiConverterOptionUInt64.read(from: &buf), + htlcMaximumMsat: FfiConverterOptionUInt64.read(from: &buf), + fees: FfiConverterTypeRoutingFees.read(from: &buf) + ) + } + + public static func write(_ value: RouteHintHop, into buf: inout [UInt8]) { + FfiConverterTypePublicKey.write(value.srcNodeId, into: &buf) + FfiConverterUInt64.write(value.shortChannelId, into: &buf) + FfiConverterUInt16.write(value.cltvExpiryDelta, into: &buf) + FfiConverterOptionUInt64.write(value.htlcMinimumMsat, into: &buf) + FfiConverterOptionUInt64.write(value.htlcMaximumMsat, into: &buf) + FfiConverterTypeRoutingFees.write(value.fees, into: &buf) + } +} + + +public func FfiConverterTypeRouteHintHop_lift(_ buf: RustBuffer) throws -> RouteHintHop { + return try FfiConverterTypeRouteHintHop.lift(buf) +} + +public func FfiConverterTypeRouteHintHop_lower(_ value: RouteHintHop) -> RustBuffer { + return FfiConverterTypeRouteHintHop.lower(value) +} + + public struct RoutingFees { public var baseMsat: UInt32 public var proportionalMillionths: UInt32 @@ -5506,6 +5811,82 @@ extension ConfirmationStatus: Equatable, Hashable {} +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum Currency { + + case bitcoin + case bitcoinTestnet + case regtest + case simnet + case signet +} + + +public struct FfiConverterTypeCurrency: FfiConverterRustBuffer { + typealias SwiftType = Currency + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Currency { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .bitcoin + + case 2: return .bitcoinTestnet + + case 3: return .regtest + + case 4: return .simnet + + case 5: return .signet + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: Currency, into buf: inout [UInt8]) { + switch value { + + + case .bitcoin: + writeInt(&buf, Int32(1)) + + + case .bitcoinTestnet: + writeInt(&buf, Int32(2)) + + + case .regtest: + writeInt(&buf, Int32(3)) + + + case .simnet: + writeInt(&buf, Int32(4)) + + + case .signet: + writeInt(&buf, Int32(5)) + + } + } +} + + +public func FfiConverterTypeCurrency_lift(_ buf: RustBuffer) throws -> Currency { + return try FfiConverterTypeCurrency.lift(buf) +} + +public func FfiConverterTypeCurrency_lower(_ value: Currency) -> RustBuffer { + return FfiConverterTypeCurrency.lower(value) +} + + + +extension Currency: Equatable, Hashable {} + + + // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. @@ -8072,6 +8453,28 @@ fileprivate struct FfiConverterSequenceTypePeerDetails: FfiConverterRustBuffer { } } +fileprivate struct FfiConverterSequenceTypeRouteHintHop: FfiConverterRustBuffer { + typealias SwiftType = [RouteHintHop] + + public static func write(_ value: [RouteHintHop], into buf: inout [UInt8]) { + let len = Int32(value.count) + writeInt(&buf, len) + for item in value { + FfiConverterTypeRouteHintHop.write(item, into: &buf) + } + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [RouteHintHop] { + let len: Int32 = try readInt(&buf) + var seq = [RouteHintHop]() + seq.reserveCapacity(Int(len)) + for _ in 0 ..< len { + seq.append(try FfiConverterTypeRouteHintHop.read(from: &buf)) + } + return seq + } +} + fileprivate struct FfiConverterSequenceTypeLightningBalance: FfiConverterRustBuffer { typealias SwiftType = [LightningBalance] @@ -8116,6 +8519,50 @@ fileprivate struct FfiConverterSequenceTypePendingSweepBalance: FfiConverterRust } } +fileprivate struct FfiConverterSequenceSequenceTypeRouteHintHop: FfiConverterRustBuffer { + typealias SwiftType = [[RouteHintHop]] + + public static func write(_ value: [[RouteHintHop]], into buf: inout [UInt8]) { + let len = Int32(value.count) + writeInt(&buf, len) + for item in value { + FfiConverterSequenceTypeRouteHintHop.write(item, into: &buf) + } + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [[RouteHintHop]] { + let len: Int32 = try readInt(&buf) + var seq = [[RouteHintHop]]() + seq.reserveCapacity(Int(len)) + for _ in 0 ..< len { + seq.append(try FfiConverterSequenceTypeRouteHintHop.read(from: &buf)) + } + return seq + } +} + +fileprivate struct FfiConverterSequenceTypeAddress: FfiConverterRustBuffer { + typealias SwiftType = [Address] + + public static func write(_ value: [Address], into buf: inout [UInt8]) { + let len = Int32(value.count) + writeInt(&buf, len) + for item in value { + FfiConverterTypeAddress.write(item, into: &buf) + } + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [Address] { + let len: Int32 = try readInt(&buf) + var seq = [Address]() + seq.reserveCapacity(Int(len)) + for _ in 0 ..< len { + seq.append(try FfiConverterTypeAddress.read(from: &buf)) + } + return seq + } +} + fileprivate struct FfiConverterSequenceTypeNodeId: FfiConverterRustBuffer { typealias SwiftType = [NodeId] @@ -8274,40 +8721,6 @@ public func FfiConverterTypeBlockHash_lower(_ value: BlockHash) -> RustBuffer { -/** - * Typealias from the type name used in the UDL file to the builtin type. This - * is needed because the UDL type name is used in function/method signatures. - */ -public typealias Bolt11Invoice = String -public struct FfiConverterTypeBolt11Invoice: FfiConverter { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> Bolt11Invoice { - return try FfiConverterString.read(from: &buf) - } - - public static func write(_ value: Bolt11Invoice, into buf: inout [UInt8]) { - return FfiConverterString.write(value, into: &buf) - } - - public static func lift(_ value: RustBuffer) throws -> Bolt11Invoice { - return try FfiConverterString.lift(value) - } - - public static func lower(_ value: Bolt11Invoice) -> RustBuffer { - return FfiConverterString.lower(value) - } -} - - -public func FfiConverterTypeBolt11Invoice_lift(_ value: RustBuffer) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift(value) -} - -public func FfiConverterTypeBolt11Invoice_lower(_ value: Bolt11Invoice) -> RustBuffer { - return FfiConverterTypeBolt11Invoice.lower(value) -} - - - /** * Typealias from the type name used in the UDL file to the builtin type. This * is needed because the UDL type name is used in function/method signatures. @@ -9032,40 +9445,88 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_func_generate_entropy_mnemonic() != 59926) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_bolt11invoice_amount_milli_satoshis() != 50823) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_currency() != 32179) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_description() != 9887) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_expiry_time_seconds() != 23625) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_fallback_addresses() != 55276) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_is_expired() != 15932) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_min_final_cltv_expiry_delta() != 8855) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_network() != 10420) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_payment_hash() != 42571) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_payment_secret() != 26081) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_recover_payee_pub_key() != 18874) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_route_hints() != 63051) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_seconds_since_epoch() != 53979) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_seconds_until_expiry() != 64193) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_signable_hash() != 30910) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_bolt11invoice_would_expire() != 30331) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_bolt11payment_claim_for_hash() != 52848) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_bolt11payment_fail_for_hash() != 24516) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_receive() != 47624) { + if (uniffi_ldk_node_checksum_method_bolt11payment_receive() != 6073) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_receive_for_hash() != 36395) { + if (uniffi_ldk_node_checksum_method_bolt11payment_receive_for_hash() != 27050) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_receive_variable_amount() != 38916) { + if (uniffi_ldk_node_checksum_method_bolt11payment_receive_variable_amount() != 4893) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_receive_variable_amount_for_hash() != 9075) { + if (uniffi_ldk_node_checksum_method_bolt11payment_receive_variable_amount_for_hash() != 1402) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_receive_variable_amount_via_jit_channel() != 58805) { + if (uniffi_ldk_node_checksum_method_bolt11payment_receive_variable_amount_via_jit_channel() != 24506) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_receive_via_jit_channel() != 30211) { + if (uniffi_ldk_node_checksum_method_bolt11payment_receive_via_jit_channel() != 16532) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send() != 39133) { + if (uniffi_ldk_node_checksum_method_bolt11payment_send() != 63952) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes() != 39625) { + if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes() != 969) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes_using_amount() != 25010) { + if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes_using_amount() != 50136) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send_using_amount() != 19557) { + if (uniffi_ldk_node_checksum_method_bolt11payment_send_using_amount() != 36530) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_bolt12payment_initiate_refund() != 38039) { @@ -9320,6 +9781,9 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_vssheaderprovider_get_headers() != 7788) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_constructor_bolt11invoice_from_str() != 349) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_constructor_builder_from_config() != 994) { return InitializationResult.apiChecksumMismatch } From 07b5471d10b71e5693fac9dcb9a183da1aaff46e Mon Sep 17 00:00:00 2001 From: alexanderwiederin Date: Sun, 1 Jun 2025 06:43:02 +0200 Subject: [PATCH 005/184] Restructure FFI bindings with conversion traits for Lightning types This commit reorganizes the FFI architecture by introducing conversion traits for lightning types. Moves code from uniffi_types.rs to a dedicated ffi module for separation of concerns. --- src/ffi/mod.rs | 47 ++++++++++++++++ src/{uniffi_types.rs => ffi/types.rs} | 19 ++++--- src/lib.rs | 5 +- src/liquidity.rs | 2 +- src/payment/bolt11.rs | 80 +++++++++------------------ src/payment/unified_qr.rs | 5 +- 6 files changed, 89 insertions(+), 69 deletions(-) create mode 100644 src/ffi/mod.rs rename src/{uniffi_types.rs => ffi/types.rs} (98%) diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs new file mode 100644 index 000000000..32464d044 --- /dev/null +++ b/src/ffi/mod.rs @@ -0,0 +1,47 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in + +#[cfg(feature = "uniffi")] +mod types; + +#[cfg(feature = "uniffi")] +pub use types::*; + +#[cfg(feature = "uniffi")] +pub fn maybe_deref(wrapped_type: &std::sync::Arc) -> &R +where + T: AsRef, +{ + wrapped_type.as_ref().as_ref() +} + +#[cfg(feature = "uniffi")] +pub fn maybe_try_convert_enum(wrapped_type: &T) -> Result +where + for<'a> R: TryFrom<&'a T, Error = crate::error::Error>, +{ + R::try_from(wrapped_type) +} + +#[cfg(feature = "uniffi")] +pub fn maybe_wrap(ldk_type: impl Into) -> std::sync::Arc { + std::sync::Arc::new(ldk_type.into()) +} + +#[cfg(not(feature = "uniffi"))] +pub fn maybe_deref(value: &T) -> &T { + value +} + +#[cfg(not(feature = "uniffi"))] +pub fn maybe_try_convert_enum(value: &T) -> Result<&T, crate::error::Error> { + Ok(value) +} + +#[cfg(not(feature = "uniffi"))] +pub fn maybe_wrap(value: T) -> T { + value +} diff --git a/src/uniffi_types.rs b/src/ffi/types.rs similarity index 98% rename from src/uniffi_types.rs rename to src/ffi/types.rs index 77f9348cc..4d3093476 100644 --- a/src/uniffi_types.rs +++ b/src/ffi/types.rs @@ -61,6 +61,7 @@ use lightning::util::ser::Writeable; use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; use std::convert::TryInto; +use std::ops::Deref; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -475,11 +476,6 @@ impl Bolt11Invoice { invoice_str.parse() } - /// Returns the underlying invoice [`LdkBolt11Invoice`] - pub fn into_inner(self) -> LdkBolt11Invoice { - self.inner - } - /// The hash of the [`RawBolt11Invoice`] that was signed. /// /// [`RawBolt11Invoice`]: lightning_invoice::RawBolt11Invoice @@ -593,9 +589,16 @@ impl From for Bolt11Invoice { } } -impl From for LdkBolt11Invoice { - fn from(wrapper: Bolt11Invoice) -> Self { - wrapper.into_inner() +impl Deref for Bolt11Invoice { + type Target = LdkBolt11Invoice; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for Bolt11Invoice { + fn as_ref(&self) -> &LdkBolt11Invoice { + self.deref() } } diff --git a/src/lib.rs b/src/lib.rs index c3bfe16d8..e80ca964d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -84,6 +84,7 @@ mod data_store; mod error; mod event; mod fee_estimator; +mod ffi; mod gossip; pub mod graph; mod hex_utils; @@ -96,8 +97,6 @@ mod peer_store; mod sweep; mod tx_broadcaster; mod types; -#[cfg(feature = "uniffi")] -mod uniffi_types; mod wallet; pub use bip39; @@ -117,7 +116,7 @@ pub use event::Event; pub use io::utils::generate_entropy_mnemonic; #[cfg(feature = "uniffi")] -use uniffi_types::*; +use ffi::*; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; diff --git a/src/liquidity.rs b/src/liquidity.rs index 47f3dcce4..a4516edd0 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -1308,7 +1308,7 @@ type PaymentInfo = lightning_liquidity::lsps1::msgs::PaymentInfo; #[derive(Clone, Debug, PartialEq, Eq)] pub struct PaymentInfo { /// A Lightning payment using BOLT 11. - pub bolt11: Option, + pub bolt11: Option, /// An onchain payment. pub onchain: Option, } diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 052571818..817a428bd 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -13,6 +13,7 @@ use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::connection::ConnectionManager; use crate::data_store::DataStoreUpdateResult; use crate::error::Error; +use crate::ffi::{maybe_deref, maybe_try_convert_enum, maybe_wrap}; use crate::liquidity::LiquiditySource; use crate::logger::{log_error, log_info, LdkLogger, Logger}; use crate::payment::store::{ @@ -42,43 +43,12 @@ use std::sync::{Arc, RwLock}; #[cfg(not(feature = "uniffi"))] type Bolt11Invoice = LdkBolt11Invoice; #[cfg(feature = "uniffi")] -type Bolt11Invoice = Arc; - -#[cfg(not(feature = "uniffi"))] -pub(crate) fn maybe_wrap_invoice(invoice: LdkBolt11Invoice) -> Bolt11Invoice { - invoice -} -#[cfg(feature = "uniffi")] -pub(crate) fn maybe_wrap_invoice(invoice: LdkBolt11Invoice) -> Bolt11Invoice { - Arc::new(invoice.into()) -} - -#[cfg(not(feature = "uniffi"))] -pub fn maybe_convert_invoice(invoice: &Bolt11Invoice) -> &LdkBolt11Invoice { - invoice -} -#[cfg(feature = "uniffi")] -pub fn maybe_convert_invoice(invoice: &Bolt11Invoice) -> &LdkBolt11Invoice { - &invoice.inner -} +type Bolt11Invoice = Arc; #[cfg(not(feature = "uniffi"))] type Bolt11InvoiceDescription = LdkBolt11InvoiceDescription; #[cfg(feature = "uniffi")] -type Bolt11InvoiceDescription = crate::uniffi_types::Bolt11InvoiceDescription; - -macro_rules! maybe_convert_description { - ($description: expr) => {{ - #[cfg(not(feature = "uniffi"))] - { - $description - } - #[cfg(feature = "uniffi")] - { - &LdkBolt11InvoiceDescription::try_from($description)? - } - }}; -} +type Bolt11InvoiceDescription = crate::ffi::Bolt11InvoiceDescription; /// A payment handler allowing to create and pay [BOLT 11] invoices. /// @@ -125,7 +95,7 @@ impl Bolt11Payment { pub fn send( &self, invoice: &Bolt11Invoice, sending_parameters: Option, ) -> Result { - let invoice = maybe_convert_invoice(invoice); + let invoice = maybe_deref(invoice); let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); @@ -234,7 +204,7 @@ impl Bolt11Payment { &self, invoice: &Bolt11Invoice, amount_msat: u64, sending_parameters: Option, ) -> Result { - let invoice = maybe_convert_invoice(invoice); + let invoice = maybe_deref(invoice); let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); @@ -466,9 +436,9 @@ impl Bolt11Payment { pub fn receive( &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, ) -> Result { - let description = maybe_convert_description!(description); - let invoice = self.receive_inner(Some(amount_msat), description, expiry_secs, None)?; - Ok(maybe_wrap_invoice(invoice)) + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_inner(Some(amount_msat), &description, expiry_secs, None)?; + Ok(maybe_wrap(invoice)) } /// Returns a payable invoice that can be used to request a payment of the amount @@ -489,10 +459,10 @@ impl Bolt11Payment { &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, payment_hash: PaymentHash, ) -> Result { - let description = maybe_convert_description!(description); + let description = maybe_try_convert_enum(description)?; let invoice = - self.receive_inner(Some(amount_msat), description, expiry_secs, Some(payment_hash))?; - Ok(maybe_wrap_invoice(invoice)) + self.receive_inner(Some(amount_msat), &description, expiry_secs, Some(payment_hash))?; + Ok(maybe_wrap(invoice)) } /// Returns a payable invoice that can be used to request and receive a payment for which the @@ -502,9 +472,9 @@ impl Bolt11Payment { pub fn receive_variable_amount( &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, ) -> Result { - let description = maybe_convert_description!(description); - let invoice = self.receive_inner(None, description, expiry_secs, None)?; - Ok(maybe_wrap_invoice(invoice)) + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_inner(None, &description, expiry_secs, None)?; + Ok(maybe_wrap(invoice)) } /// Returns a payable invoice that can be used to request a payment for the given payment hash @@ -524,9 +494,9 @@ impl Bolt11Payment { pub fn receive_variable_amount_for_hash( &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, payment_hash: PaymentHash, ) -> Result { - let description = maybe_convert_description!(description); - let invoice = self.receive_inner(None, description, expiry_secs, Some(payment_hash))?; - Ok(maybe_wrap_invoice(invoice)) + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_inner(None, &description, expiry_secs, Some(payment_hash))?; + Ok(maybe_wrap(invoice)) } pub(crate) fn receive_inner( @@ -601,15 +571,15 @@ impl Bolt11Payment { &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, max_total_lsp_fee_limit_msat: Option, ) -> Result { - let description = maybe_convert_description!(description); + let description = maybe_try_convert_enum(description)?; let invoice = self.receive_via_jit_channel_inner( Some(amount_msat), - description, + &description, expiry_secs, max_total_lsp_fee_limit_msat, None, )?; - Ok(maybe_wrap_invoice(invoice)) + Ok(maybe_wrap(invoice)) } /// Returns a payable invoice that can be used to request a variable amount payment (also known @@ -627,15 +597,15 @@ impl Bolt11Payment { &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, max_proportional_lsp_fee_limit_ppm_msat: Option, ) -> Result { - let description = maybe_convert_description!(description); + let description = maybe_try_convert_enum(description)?; let invoice = self.receive_via_jit_channel_inner( None, - description, + &description, expiry_secs, None, max_proportional_lsp_fee_limit_ppm_msat, )?; - Ok(maybe_wrap_invoice(invoice)) + Ok(maybe_wrap(invoice)) } fn receive_via_jit_channel_inner( @@ -742,7 +712,7 @@ impl Bolt11Payment { /// amount times [`Config::probing_liquidity_limit_multiplier`] won't be used to send /// pre-flight probes. pub fn send_probes(&self, invoice: &Bolt11Invoice) -> Result<(), Error> { - let invoice = maybe_convert_invoice(invoice); + let invoice = maybe_deref(invoice); let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); @@ -775,7 +745,7 @@ impl Bolt11Payment { pub fn send_probes_using_amount( &self, invoice: &Bolt11Invoice, amount_msat: u64, ) -> Result<(), Error> { - let invoice = maybe_convert_invoice(invoice); + let invoice = maybe_deref(invoice); let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index 5e6c1ef60..abfc5b784 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -12,8 +12,9 @@ //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md use crate::error::Error; +use crate::ffi::maybe_wrap; use crate::logger::{log_error, LdkLogger, Logger}; -use crate::payment::{bolt11::maybe_wrap_invoice, Bolt11Payment, Bolt12Payment, OnchainPayment}; +use crate::payment::{Bolt11Payment, Bolt12Payment, OnchainPayment}; use crate::Config; use lightning::ln::channelmanager::PaymentId; @@ -153,7 +154,7 @@ impl UnifiedQrPayment { } if let Some(invoice) = uri_network_checked.extras.bolt11_invoice { - let invoice = maybe_wrap_invoice(invoice); + let invoice = maybe_wrap(invoice); match self.bolt11_invoice.send(&invoice, None) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt11 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified QR code payment. Falling back to the on-chain transaction.", e), From ef81e0d65947aa797f5c91e92036e37c735b6737 Mon Sep 17 00:00:00 2001 From: alexanderwiederin Date: Mon, 5 May 2025 19:49:02 +0200 Subject: [PATCH 006/184] Add Offer wrapper for FFI bindings Implement Offer struct in ffi/types.rs to provide a wrapper around LDK's Offer for cross-language bindings. Modified payment handling in bolt12.rs to: - Support both native and FFI-compatible types via type aliasing - Implement conditional compilation for transparent FFI support - Update payment functions to handle wrapped types Added testing to verify that properties are preserved when wrapping/unwrapping between native and FFI types. --- bindings/ldk_node.udl | 27 +++- src/ffi/types.rs | 310 +++++++++++++++++++++++++++++++++++++- src/payment/bolt12.rs | 27 +++- src/payment/unified_qr.rs | 17 ++- 4 files changed, 357 insertions(+), 24 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index c2f0166c8..38ab4677c 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -736,6 +736,30 @@ interface Bolt11Invoice { PublicKey recover_payee_pub_key(); }; +[Enum] +interface OfferAmount { + Bitcoin(u64 amount_msats); + Currency(string iso4217_code, u64 amount); +}; + +[Traits=(Debug, Display, Eq)] +interface Offer { + [Throws=NodeError, Name=from_str] + constructor([ByRef] string offer_str); + OfferId id(); + boolean is_expired(); + string? description(); + string? issuer(); + OfferAmount? amount(); + boolean is_valid_quantity(u64 quantity); + boolean expects_quantity(); + boolean supports_chain(Network chain); + sequence chains(); + sequence? metadata(); + u64? absolute_expiry_seconds(); + PublicKey? issuer_signing_pubkey(); +}; + [Custom] typedef string Txid; @@ -754,9 +778,6 @@ typedef string NodeId; [Custom] typedef string Address; -[Custom] -typedef string Offer; - [Custom] typedef string Refund; diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 4d3093476..8b511c830 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -26,7 +26,7 @@ pub use lightning::chain::channelmonitor::BalanceSource; pub use lightning::events::{ClosureReason, PaymentFailureReason}; pub use lightning::ln::types::ChannelId; pub use lightning::offers::invoice::Bolt12Invoice; -pub use lightning::offers::offer::{Offer, OfferId}; +pub use lightning::offers::offer::OfferId; pub use lightning::offers::refund::Refund; pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::util::string::UntrustedString; @@ -57,6 +57,7 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; use lightning::ln::channelmanager::PaymentId; +use lightning::offers::offer::{Amount as LdkAmount, Offer as LdkOffer}; use lightning::util::ser::Writeable; use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; @@ -114,15 +115,166 @@ impl UniffiCustomTypeConverter for Address { } } -impl UniffiCustomTypeConverter for Offer { - type Builtin = String; +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OfferAmount { + Bitcoin { amount_msats: u64 }, + Currency { iso4217_code: String, amount: u64 }, +} - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Offer::from_str(&val).map_err(|_| Error::InvalidOffer.into()) +impl From for OfferAmount { + fn from(ldk_amount: LdkAmount) -> Self { + match ldk_amount { + LdkAmount::Bitcoin { amount_msats } => OfferAmount::Bitcoin { amount_msats }, + LdkAmount::Currency { iso4217_code, amount } => OfferAmount::Currency { + iso4217_code: iso4217_code.iter().map(|&b| b as char).collect(), + amount, + }, + } } +} - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() +/// An `Offer` is a potentially long-lived proposal for payment of a good or service. +/// +/// An offer is a precursor to an [`InvoiceRequest`]. A merchant publishes an offer from which a +/// customer may request an [`Bolt12Invoice`] for a specific quantity and using an amount sufficient +/// to cover that quantity (i.e., at least `quantity * amount`). See [`Offer::amount`]. +/// +/// Offers may be denominated in currency other than bitcoin but are ultimately paid using the +/// latter. +/// +/// Through the use of [`BlindedMessagePath`]s, offers provide recipient privacy. +/// +/// [`InvoiceRequest`]: lightning::offers::invoice_request::InvoiceRequest +/// [`Bolt12Invoice`]: lightning::offers::invoice::Bolt12Invoice +/// [`Offer`]: lightning::offers::Offer:amount +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Offer { + pub(crate) inner: LdkOffer, +} + +impl Offer { + pub fn from_str(offer_str: &str) -> Result { + offer_str.parse() + } + + /// Returns the id of the offer. + pub fn id(&self) -> OfferId { + OfferId(self.inner.id().0) + } + + /// Whether the offer has expired. + pub fn is_expired(&self) -> bool { + self.inner.is_expired() + } + + /// A complete description of the purpose of the payment. + /// + /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. + pub fn description(&self) -> Option { + self.inner.description().map(|printable| printable.to_string()) + } + + /// The issuer of the offer, possibly beginning with `user@domain` or `domain`. + /// + /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. + pub fn issuer(&self) -> Option { + self.inner.issuer().map(|printable| printable.to_string()) + } + + /// The minimum amount required for a successful payment of a single item. + pub fn amount(&self) -> Option { + self.inner.amount().map(|amount| amount.into()) + } + + /// Returns whether the given quantity is valid for the offer. + pub fn is_valid_quantity(&self, quantity: u64) -> bool { + self.inner.is_valid_quantity(quantity) + } + + /// Returns whether a quantity is expected in an [`InvoiceRequest`] for the offer. + /// + /// [`InvoiceRequest`]: lightning::offers::invoice_request::InvoiceRequest + pub fn expects_quantity(&self) -> bool { + self.inner.expects_quantity() + } + + /// Returns whether the given chain is supported by the offer. + pub fn supports_chain(&self, chain: Network) -> bool { + self.inner.supports_chain(chain.chain_hash()) + } + + /// The chains that may be used when paying a requested invoice (e.g., bitcoin mainnet). + /// + /// Payments must be denominated in units of the minimal lightning-payable unit (e.g., msats) + /// for the selected chain. + pub fn chains(&self) -> Vec { + self.inner.chains().into_iter().filter_map(Network::from_chain_hash).collect() + } + + /// Opaque bytes set by the originator. + /// + /// Useful for authentication and validating fields since it is reflected in `invoice_request` + /// messages along with all the other fields from the `offer`. + pub fn metadata(&self) -> Option> { + self.inner.metadata().cloned() + } + + /// Seconds since the Unix epoch when an invoice should no longer be requested. + /// + /// If `None`, the offer does not expire. + pub fn absolute_expiry_seconds(&self) -> Option { + self.inner.absolute_expiry().map(|duration| duration.as_secs()) + } + + /// The public key corresponding to the key used by the recipient to sign invoices. + /// - If [`Offer::paths`] is empty, MUST be `Some` and contain the recipient's node id for + /// sending an [`InvoiceRequest`]. + /// - If [`Offer::paths`] is not empty, MAY be `Some` and contain a transient id. + /// - If `None`, the signing pubkey will be the final blinded node id from the + /// [`BlindedMessagePath`] in [`Offer::paths`] used to send the [`InvoiceRequest`]. + /// + /// See also [`Bolt12Invoice::signing_pubkey`]. + /// + /// [`InvoiceRequest`]: lightning::offers::invoice_request::InvoiceRequest + /// [`Bolt12Invoice::signing_pubkey`]: lightning::offers::invoice::Bolt12Invoice::signing_pubkey + pub fn issuer_signing_pubkey(&self) -> Option { + self.inner.issuer_signing_pubkey() + } +} + +impl std::str::FromStr for Offer { + type Err = Error; + + fn from_str(offer_str: &str) -> Result { + offer_str + .parse::() + .map(|offer| Offer { inner: offer }) + .map_err(|_| Error::InvalidOffer) + } +} + +impl From for Offer { + fn from(offer: LdkOffer) -> Self { + Offer { inner: offer } + } +} + +impl Deref for Offer { + type Target = LdkOffer; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for Offer { + fn as_ref(&self) -> &LdkOffer { + self.deref() + } +} + +impl std::fmt::Display for Offer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.inner) } } @@ -661,6 +813,13 @@ impl UniffiCustomTypeConverter for DateTime { #[cfg(test)] mod tests { + use std::{ + num::NonZeroU64, + time::{SystemTime, UNIX_EPOCH}, + }; + + use lightning::offers::offer::{OfferBuilder, Quantity}; + use super::*; fn create_test_invoice() -> (LdkBolt11Invoice, Bolt11Invoice) { @@ -670,6 +829,36 @@ mod tests { (ldk_invoice, wrapped_invoice) } + fn create_test_offer() -> (LdkOffer, Offer) { + let pubkey = bitcoin::secp256k1::PublicKey::from_str( + "02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619", + ) + .unwrap(); + + let expiry = + (SystemTime::now() + Duration::from_secs(3600)).duration_since(UNIX_EPOCH).unwrap(); + + let quantity = NonZeroU64::new(10_000).unwrap(); + + let builder = OfferBuilder::new(pubkey) + .description("Test offer description".to_string()) + .amount_msats(100_000) + .issuer("Offer issuer".to_string()) + .absolute_expiry(expiry) + .chain(Network::Bitcoin) + .supported_quantity(Quantity::Bounded(quantity)) + .metadata(vec![ + 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe, 0xba, 0xbe, 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, + 0xcd, 0xef, + ]) + .unwrap(); + + let ldk_offer = builder.build().unwrap(); + let wrapped_offer = Offer::from(ldk_offer.clone()); + + (ldk_offer, wrapped_offer) + } + #[test] fn test_invoice_description_conversion() { let hash = "09d08d4865e8af9266f6cc7c0ae23a1d6bf868207cf8f7c5979b9f6ed850dfb0".to_string(); @@ -779,4 +968,111 @@ mod tests { parsed_invoice.payment_hash().to_byte_array().to_vec() ); } + + #[test] + fn test_offer() { + let (ldk_offer, wrapped_offer) = create_test_offer(); + match (ldk_offer.description(), wrapped_offer.description()) { + (Some(ldk_desc), Some(wrapped_desc)) => { + assert_eq!(ldk_desc.to_string(), wrapped_desc); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had a description but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had a description but LDK offer did not!"); + }, + } + + match (ldk_offer.amount(), wrapped_offer.amount()) { + (Some(ldk_amount), Some(wrapped_amount)) => { + let ldk_amount: OfferAmount = ldk_amount.into(); + assert_eq!(ldk_amount, wrapped_amount); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had an amount but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had an amount but LDK offer did not!"); + }, + } + + match (ldk_offer.issuer(), wrapped_offer.issuer()) { + (Some(ldk_issuer), Some(wrapped_issuer)) => { + assert_eq!(ldk_issuer.to_string(), wrapped_issuer); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had an issuer but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had an issuer but LDK offer did not!"); + }, + } + + assert_eq!(ldk_offer.is_expired(), wrapped_offer.is_expired()); + assert_eq!(ldk_offer.id(), wrapped_offer.id()); + assert_eq!(ldk_offer.is_valid_quantity(10_000), wrapped_offer.is_valid_quantity(10_000)); + assert_eq!(ldk_offer.expects_quantity(), wrapped_offer.expects_quantity()); + assert_eq!( + ldk_offer.supports_chain(Network::Bitcoin.chain_hash()), + wrapped_offer.supports_chain(Network::Bitcoin) + ); + assert_eq!( + ldk_offer.chains(), + wrapped_offer.chains().iter().map(|c| c.chain_hash()).collect::>() + ); + match (ldk_offer.metadata(), wrapped_offer.metadata()) { + (Some(ldk_metadata), Some(wrapped_metadata)) => { + assert_eq!(ldk_metadata.clone(), wrapped_metadata); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had metadata but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had metadata but LDK offer did not!"); + }, + } + + match (ldk_offer.absolute_expiry(), wrapped_offer.absolute_expiry_seconds()) { + (Some(ldk_expiry), Some(wrapped_expiry)) => { + assert_eq!(ldk_expiry.as_secs(), wrapped_expiry); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had an absolute expiry but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had an absolute expiry but LDK offer did not!"); + }, + } + + match (ldk_offer.issuer_signing_pubkey(), wrapped_offer.issuer_signing_pubkey()) { + (Some(ldk_expiry_signing_pubkey), Some(wrapped_issuer_signing_pubkey)) => { + assert_eq!(ldk_expiry_signing_pubkey, wrapped_issuer_signing_pubkey); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had an issuer signing pubkey but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had an issuer signing pubkey but LDK offer did not!"); + }, + } + } } diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 8006f4bb9..aa642a084 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -11,13 +11,14 @@ use crate::config::LDK_PAYMENT_RETRY_TIMEOUT; use crate::error::Error; +use crate::ffi::{maybe_deref, maybe_wrap}; use crate::logger::{log_error, log_info, LdkLogger, Logger}; use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; use crate::types::{ChannelManager, PaymentStore}; use lightning::ln::channelmanager::{PaymentId, Retry}; use lightning::offers::invoice::Bolt12Invoice; -use lightning::offers::offer::{Amount, Offer, Quantity}; +use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::offers::refund::Refund; use lightning::util::string::UntrustedString; @@ -28,6 +29,11 @@ use std::num::NonZeroU64; use std::sync::{Arc, RwLock}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; +#[cfg(not(feature = "uniffi"))] +type Offer = LdkOffer; +#[cfg(feature = "uniffi")] +type Offer = Arc; + /// A payment handler allowing to create and pay [BOLT 12] offers and refunds. /// /// Should be retrieved by calling [`Node::bolt12_payment`]. @@ -59,6 +65,7 @@ impl Bolt12Payment { pub fn send( &self, offer: &Offer, quantity: Option, payer_note: Option, ) -> Result { + let offer = maybe_deref(offer); let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); @@ -160,6 +167,7 @@ impl Bolt12Payment { pub fn send_using_amount( &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, ) -> Result { + let offer = maybe_deref(offer); let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); @@ -254,11 +262,9 @@ impl Bolt12Payment { } } - /// Returns a payable offer that can be used to request and receive a payment of the amount - /// given. - pub fn receive( + pub(crate) fn receive_inner( &self, amount_msat: u64, description: &str, expiry_secs: Option, quantity: Option, - ) -> Result { + ) -> Result { let absolute_expiry = expiry_secs.map(|secs| { (SystemTime::now() + Duration::from_secs(secs as u64)) .duration_since(UNIX_EPOCH) @@ -291,6 +297,15 @@ impl Bolt12Payment { Ok(finalized_offer) } + /// Returns a payable offer that can be used to request and receive a payment of the amount + /// given. + pub fn receive( + &self, amount_msat: u64, description: &str, expiry_secs: Option, quantity: Option, + ) -> Result { + let offer = self.receive_inner(amount_msat, description, expiry_secs, quantity)?; + Ok(maybe_wrap(offer)) + } + /// Returns a payable offer that can be used to request and receive a payment for which the /// amount is to be determined by the user, also known as a "zero-amount" offer. pub fn receive_variable_amount( @@ -312,7 +327,7 @@ impl Bolt12Payment { Error::OfferCreationFailed })?; - Ok(offer) + Ok(maybe_wrap(offer)) } /// Requests a refund payment for the given [`Refund`]. diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index abfc5b784..125e1d09b 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -95,14 +95,14 @@ impl UnifiedQrPayment { let amount_msats = amount_sats * 1_000; - let bolt12_offer = match self.bolt12_payment.receive(amount_msats, description, None, None) - { - Ok(offer) => Some(offer), - Err(e) => { - log_error!(self.logger, "Failed to create offer: {}", e); - None - }, - }; + let bolt12_offer = + match self.bolt12_payment.receive_inner(amount_msats, description, None, None) { + Ok(offer) => Some(offer), + Err(e) => { + log_error!(self.logger, "Failed to create offer: {}", e); + None + }, + }; let invoice_description = Bolt11InvoiceDescription::Direct( Description::new(description.to_string()).map_err(|_| Error::InvoiceCreationFailed)?, @@ -147,6 +147,7 @@ impl UnifiedQrPayment { uri.clone().require_network(self.config.network).map_err(|_| Error::InvalidNetwork)?; if let Some(offer) = uri_network_checked.extras.bolt12_offer { + let offer = maybe_wrap(offer); match self.bolt12_payment.send(&offer, None, None) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt12 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified QR code payment. Falling back to the BOLT11 invoice.", e), From dc6a8f20548a21d38d1906c2a90100ae82e78861 Mon Sep 17 00:00:00 2001 From: alexanderwiederin Date: Mon, 5 May 2025 19:50:24 +0200 Subject: [PATCH 007/184] Add Refund wrapper for FFI bindings Implement Refund struct in ffi/types.rs to provide a wrapper around LDK's Refund for cross-language bindings. Modified payment handling in bolt12.rs to: - Support both native and FFI-compatible types via type aliasing - Implement conditional compilation for transparent FFI support - Update payment functions to handle wrapped types Added testing to verify that properties are preserved when wrapping/unwrapping between native and FFI types. --- bindings/ldk_node.udl | 19 +++- src/ffi/types.rs | 227 ++++++++++++++++++++++++++++++++++++++++-- src/payment/bolt12.rs | 15 ++- 3 files changed, 246 insertions(+), 15 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 38ab4677c..d48993532 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -760,6 +760,22 @@ interface Offer { PublicKey? issuer_signing_pubkey(); }; +[Traits=(Debug, Display, Eq)] +interface Refund { + [Throws=NodeError, Name=from_str] + constructor([ByRef] string refund_str); + string description(); + u64? absolute_expiry_seconds(); + boolean is_expired(); + string? issuer(); + sequence payer_metadata(); + Network? chain(); + u64 amount_msats(); + u64? quantity(); + PublicKey payer_signing_pubkey(); + string? payer_note(); +}; + [Custom] typedef string Txid; @@ -778,9 +794,6 @@ typedef string NodeId; [Custom] typedef string Address; -[Custom] -typedef string Refund; - [Custom] typedef string Bolt12Invoice; diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 8b511c830..6c0aaf880 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -27,7 +27,6 @@ pub use lightning::events::{ClosureReason, PaymentFailureReason}; pub use lightning::ln::types::ChannelId; pub use lightning::offers::invoice::Bolt12Invoice; pub use lightning::offers::offer::OfferId; -pub use lightning::offers::refund::Refund; pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::util::string::UntrustedString; @@ -58,6 +57,7 @@ use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; use lightning::ln::channelmanager::PaymentId; use lightning::offers::offer::{Amount as LdkAmount, Offer as LdkOffer}; +use lightning::offers::refund::Refund as LdkRefund; use lightning::util::ser::Writeable; use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; @@ -278,15 +278,123 @@ impl std::fmt::Display for Offer { } } -impl UniffiCustomTypeConverter for Refund { - type Builtin = String; +/// A `Refund` is a request to send an [`Bolt12Invoice`] without a preceding [`Offer`]. +/// +/// Typically, after an invoice is paid, the recipient may publish a refund allowing the sender to +/// recoup their funds. A refund may be used more generally as an "offer for money", such as with a +/// bitcoin ATM. +/// +/// [`Bolt12Invoice`]: lightning::offers::invoice::Bolt12Invoice +/// [`Offer`]: lightning::offers::offer::Offer +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Refund { + pub(crate) inner: LdkRefund, +} - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Refund::from_str(&val).map_err(|_| Error::InvalidRefund.into()) +impl Refund { + pub fn from_str(refund_str: &str) -> Result { + refund_str.parse() } - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() + /// A complete description of the purpose of the refund. + /// + /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. + pub fn description(&self) -> String { + self.inner.description().to_string() + } + + /// Seconds since the Unix epoch when an invoice should no longer be sent. + /// + /// If `None`, the refund does not expire. + pub fn absolute_expiry_seconds(&self) -> Option { + self.inner.absolute_expiry().map(|duration| duration.as_secs()) + } + + /// Whether the refund has expired. + pub fn is_expired(&self) -> bool { + self.inner.is_expired() + } + + /// The issuer of the refund, possibly beginning with `user@domain` or `domain`. + /// + /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. + pub fn issuer(&self) -> Option { + self.inner.issuer().map(|printable| printable.to_string()) + } + + /// An unpredictable series of bytes, typically containing information about the derivation of + /// [`payer_signing_pubkey`]. + /// + /// [`payer_signing_pubkey`]: Self::payer_signing_pubkey + pub fn payer_metadata(&self) -> Vec { + self.inner.payer_metadata().to_vec() + } + + /// A chain that the refund is valid for. + pub fn chain(&self) -> Option { + Network::try_from(self.inner.chain()).ok() + } + + /// The amount to refund in msats (i.e., the minimum lightning-payable unit for [`chain`]). + /// + /// [`chain`]: Self::chain + pub fn amount_msats(&self) -> u64 { + self.inner.amount_msats() + } + + /// The quantity of an item that refund is for. + pub fn quantity(&self) -> Option { + self.inner.quantity() + } + + /// A public node id to send to in the case where there are no [`paths`]. + /// + /// Otherwise, a possibly transient pubkey. + /// + /// [`paths`]: lightning::offers::refund::Refund::paths + pub fn payer_signing_pubkey(&self) -> PublicKey { + self.inner.payer_signing_pubkey() + } + + /// Payer provided note to include in the invoice. + pub fn payer_note(&self) -> Option { + self.inner.payer_note().map(|printable| printable.to_string()) + } +} + +impl std::str::FromStr for Refund { + type Err = Error; + + fn from_str(refund_str: &str) -> Result { + refund_str + .parse::() + .map(|refund| Refund { inner: refund }) + .map_err(|_| Error::InvalidRefund) + } +} + +impl From for Refund { + fn from(refund: LdkRefund) -> Self { + Refund { inner: refund } + } +} + +impl Deref for Refund { + type Target = LdkRefund; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for Refund { + fn as_ref(&self) -> &LdkRefund { + self.deref() + } +} + +impl std::fmt::Display for Refund { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.inner) } } @@ -818,9 +926,11 @@ mod tests { time::{SystemTime, UNIX_EPOCH}, }; - use lightning::offers::offer::{OfferBuilder, Quantity}; - use super::*; + use lightning::offers::{ + offer::{OfferBuilder, Quantity}, + refund::RefundBuilder, + }; fn create_test_invoice() -> (LdkBolt11Invoice, Bolt11Invoice) { let invoice_string = "lnbc1pn8g249pp5f6ytj32ty90jhvw69enf30hwfgdhyymjewywcmfjevflg6s4z86qdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdfjjzh2j9f7ye5qqqqryqqqqthqqpysp5mm832athgcal3m7h35sc29j63lmgzvwc5smfjh2es65elc2ns7dq9qrsgqu2xcje2gsnjp0wn97aknyd3h58an7sjj6nhcrm40846jxphv47958c6th76whmec8ttr2wmg6sxwchvxmsc00kqrzqcga6lvsf9jtqgqy5yexa"; @@ -859,6 +969,28 @@ mod tests { (ldk_offer, wrapped_offer) } + fn create_test_refund() -> (LdkRefund, Refund) { + let payer_key = bitcoin::secp256k1::PublicKey::from_str( + "02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619", + ) + .unwrap(); + + let expiry = + (SystemTime::now() + Duration::from_secs(3600)).duration_since(UNIX_EPOCH).unwrap(); + + let builder = RefundBuilder::new("Test refund".to_string().into(), payer_key, 100_000) + .unwrap() + .description("Test refund description".to_string()) + .absolute_expiry(expiry) + .quantity(3) + .issuer("test_issuer".to_string()); + + let ldk_refund = builder.build().unwrap(); + let wrapped_refund = Refund::from(ldk_refund.clone()); + + (ldk_refund, wrapped_refund) + } + #[test] fn test_invoice_description_conversion() { let hash = "09d08d4865e8af9266f6cc7c0ae23a1d6bf868207cf8f7c5979b9f6ed850dfb0".to_string(); @@ -1075,4 +1207,81 @@ mod tests { }, } } + + #[test] + fn test_refund_roundtrip() { + let (ldk_refund, _) = create_test_refund(); + + let refund_str = ldk_refund.to_string(); + + let parsed_refund = Refund::from_str(&refund_str); + assert!(parsed_refund.is_ok(), "Failed to parse refund from string!"); + + let invalid_result = Refund::from_str("invalid_refund_string"); + assert!(invalid_result.is_err()); + assert!(matches!(invalid_result.err().unwrap(), Error::InvalidRefund)); + } + + #[test] + fn test_refund_properties() { + let (ldk_refund, wrapped_refund) = create_test_refund(); + + assert_eq!(ldk_refund.description().to_string(), wrapped_refund.description()); + assert_eq!(ldk_refund.amount_msats(), wrapped_refund.amount_msats()); + assert_eq!(ldk_refund.is_expired(), wrapped_refund.is_expired()); + + match (ldk_refund.absolute_expiry(), wrapped_refund.absolute_expiry_seconds()) { + (Some(ldk_expiry), Some(wrapped_expiry)) => { + assert_eq!(ldk_expiry.as_secs(), wrapped_expiry); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK refund had an expiry but wrapped refund did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped refund had an expiry but LDK refund did not!"); + }, + } + + match (ldk_refund.quantity(), wrapped_refund.quantity()) { + (Some(ldk_expiry), Some(wrapped_expiry)) => { + assert_eq!(ldk_expiry, wrapped_expiry); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK refund had an quantity but wrapped refund did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped refund had an quantity but LDK refund did not!"); + }, + } + + match (ldk_refund.issuer(), wrapped_refund.issuer()) { + (Some(ldk_issuer), Some(wrapped_issuer)) => { + assert_eq!(ldk_issuer.to_string(), wrapped_issuer); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK refund had an issuer but wrapped refund did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped refund had an issuer but LDK refund did not!"); + }, + } + + assert_eq!(ldk_refund.payer_metadata().to_vec(), wrapped_refund.payer_metadata()); + assert_eq!(ldk_refund.payer_signing_pubkey(), wrapped_refund.payer_signing_pubkey()); + + if let Ok(network) = Network::try_from(ldk_refund.chain()) { + assert_eq!(wrapped_refund.chain(), Some(network)); + } + + assert_eq!(ldk_refund.payer_note().map(|p| p.to_string()), wrapped_refund.payer_note()); + } } diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index aa642a084..74c3ac45f 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -20,7 +20,6 @@ use lightning::ln::channelmanager::{PaymentId, Retry}; use lightning::offers::invoice::Bolt12Invoice; use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; use lightning::offers::parse::Bolt12SemanticError; -use lightning::offers::refund::Refund; use lightning::util::string::UntrustedString; use rand::RngCore; @@ -34,6 +33,11 @@ type Offer = LdkOffer; #[cfg(feature = "uniffi")] type Offer = Arc; +#[cfg(not(feature = "uniffi"))] +type Refund = lightning::offers::refund::Refund; +#[cfg(feature = "uniffi")] +type Refund = Arc; + /// A payment handler allowing to create and pay [BOLT 12] offers and refunds. /// /// Should be retrieved by calling [`Node::bolt12_payment`]. @@ -334,8 +338,11 @@ impl Bolt12Payment { /// /// The returned [`Bolt12Invoice`] is for informational purposes only (i.e., isn't needed to /// retrieve the refund). + /// + /// [`Refund`]: lightning::offers::refund::Refund pub fn request_refund_payment(&self, refund: &Refund) -> Result { - let invoice = self.channel_manager.request_refund_payment(refund).map_err(|e| { + let refund = maybe_deref(refund); + let invoice = self.channel_manager.request_refund_payment(&refund).map_err(|e| { log_error!(self.logger, "Failed to request refund payment: {:?}", e); Error::InvoiceRequestCreationFailed })?; @@ -366,6 +373,8 @@ impl Bolt12Payment { } /// Returns a [`Refund`] object that can be used to offer a refund payment of the amount given. + /// + /// [`Refund`]: lightning::offers::refund::Refund pub fn initiate_refund( &self, amount_msat: u64, expiry_secs: u32, quantity: Option, payer_note: Option, @@ -427,6 +436,6 @@ impl Bolt12Payment { self.payment_store.insert(payment)?; - Ok(refund) + Ok(maybe_wrap(refund)) } } From f01d0219efc1fb7542e1e760f1a10504806edd5e Mon Sep 17 00:00:00 2001 From: alexanderwiederin Date: Mon, 12 May 2025 22:50:49 +0200 Subject: [PATCH 008/184] Add Bolt12Invoice wrapper for FFI bindings Implement Bolt12Invoice struct in ffi/types.rs to provide a wrapper around LDK's Bolt12Invoice for cross-language bindings. Modified payment handling in bolt12.rs to: - Support both native and FFI-compatible types via type aliasing - Implement conditional compilation for transparent FFI support - Update payment functions to handle wrapped types Added testing to verify that properties are preserved when wrapping/unwrapping between native and FFI types. --- bindings/ldk_node.udl | 28 +++- src/ffi/types.rs | 369 ++++++++++++++++++++++++++++++++++++++++-- src/payment/bolt12.rs | 9 +- 3 files changed, 385 insertions(+), 21 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index d48993532..505f0db8d 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -776,6 +776,31 @@ interface Refund { string? payer_note(); }; +interface Bolt12Invoice { + [Throws=NodeError, Name=from_str] + constructor([ByRef] string invoice_str); + PaymentHash payment_hash(); + u64 amount_msats(); + OfferAmount? amount(); + PublicKey signing_pubkey(); + u64 created_at(); + u64? absolute_expiry_seconds(); + u64 relative_expiry(); + boolean is_expired(); + string? description(); + string? issuer(); + string? payer_note(); + sequence? metadata(); + u64? quantity(); + sequence signable_hash(); + PublicKey payer_signing_pubkey(); + PublicKey? issuer_signing_pubkey(); + sequence chain(); + sequence>? offer_chains(); + sequence
fallback_addresses(); + sequence encode(); +}; + [Custom] typedef string Txid; @@ -794,9 +819,6 @@ typedef string NodeId; [Custom] typedef string Address; -[Custom] -typedef string Bolt12Invoice; - [Custom] typedef string OfferId; diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 6c0aaf880..bbf730211 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -25,7 +25,6 @@ pub use crate::payment::{MaxTotalRoutingFeeLimit, QrPaymentResult, SendingParame pub use lightning::chain::channelmonitor::BalanceSource; pub use lightning::events::{ClosureReason, PaymentFailureReason}; pub use lightning::ln::types::ChannelId; -pub use lightning::offers::invoice::Bolt12Invoice; pub use lightning::offers::offer::OfferId; pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::util::string::UntrustedString; @@ -56,6 +55,7 @@ use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; use lightning::ln::channelmanager::PaymentId; +use lightning::offers::invoice::Bolt12Invoice as LdkBolt12Invoice; use lightning::offers::offer::{Amount as LdkAmount, Offer as LdkOffer}; use lightning::offers::refund::Refund as LdkRefund; use lightning::util::ser::Writeable; @@ -398,20 +398,218 @@ impl std::fmt::Display for Refund { } } -impl UniffiCustomTypeConverter for Bolt12Invoice { - type Builtin = String; +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Bolt12Invoice { + pub(crate) inner: LdkBolt12Invoice, +} - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Some(bytes_vec) = hex_utils::to_vec(&val) { - if let Ok(invoice) = Bolt12Invoice::try_from(bytes_vec) { - return Ok(invoice); +impl Bolt12Invoice { + pub fn from_str(invoice_str: &str) -> Result { + invoice_str.parse() + } + + /// SHA256 hash of the payment preimage that will be given in return for paying the invoice. + pub fn payment_hash(&self) -> PaymentHash { + PaymentHash(self.inner.payment_hash().0) + } + + /// The minimum amount required for a successful payment of the invoice. + pub fn amount_msats(&self) -> u64 { + self.inner.amount_msats() + } + + /// The minimum amount required for a successful payment of a single item. + /// + /// From [`Offer::amount`]; `None` if the invoice was created in response to a [`Refund`] or if + /// the [`Offer`] did not set it. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`Offer::amount`]: lightning::offers::offer::Offer::amount + /// [`Refund`]: lightning::offers::refund::Refund + pub fn amount(&self) -> Option { + self.inner.amount().map(|amount| amount.into()) + } + + /// A typically transient public key corresponding to the key used to sign the invoice. + /// + /// If the invoices was created in response to an [`Offer`], then this will be: + /// - [`Offer::issuer_signing_pubkey`] if it's `Some`, otherwise + /// - the final blinded node id from a [`BlindedMessagePath`] in [`Offer::paths`] if `None`. + /// + /// If the invoice was created in response to a [`Refund`], then it is a valid pubkey chosen by + /// the recipient. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`Offer::issuer_signing_pubkey`]: lightning::offers::offer::Offer::issuer_signing_pubkey + /// [`Offer::paths`]: lightning::offers::offer::Offer::paths + /// [`Refund`]: lightning::offers::refund::Refund + pub fn signing_pubkey(&self) -> PublicKey { + self.inner.signing_pubkey() + } + + /// Duration since the Unix epoch when the invoice was created. + pub fn created_at(&self) -> u64 { + self.inner.created_at().as_secs() + } + + /// Seconds since the Unix epoch when an invoice should no longer be requested. + /// + /// From [`Offer::absolute_expiry`] or [`Refund::absolute_expiry`]. + /// + /// [`Offer::absolute_expiry`]: lightning::offers::offer::Offer::absolute_expiry + pub fn absolute_expiry_seconds(&self) -> Option { + self.inner.absolute_expiry().map(|duration| duration.as_secs()) + } + + /// When the invoice has expired and therefore should no longer be paid. + pub fn relative_expiry(&self) -> u64 { + self.inner.relative_expiry().as_secs() + } + + /// Whether the invoice has expired. + pub fn is_expired(&self) -> bool { + self.inner.is_expired() + } + + /// A complete description of the purpose of the originating offer or refund. + /// + /// From [`Offer::description`] or [`Refund::description`]. + /// + /// [`Offer::description`]: lightning::offers::offer::Offer::description + /// [`Refund::description`]: lightning::offers::refund::Refund::description + pub fn description(&self) -> Option { + self.inner.description().map(|printable| printable.to_string()) + } + + /// The issuer of the offer or refund. + /// + /// From [`Offer::issuer`] or [`Refund::issuer`]. + /// + /// [`Offer::issuer`]: lightning::offers::offer::Offer::issuer + /// [`Refund::issuer`]: lightning::offers::refund::Refund::issuer + pub fn issuer(&self) -> Option { + self.inner.issuer().map(|printable| printable.to_string()) + } + + /// A payer-provided note reflected back in the invoice. + /// + /// From [`InvoiceRequest::payer_note`] or [`Refund::payer_note`]. + /// + /// [`Refund::payer_note`]: lightning::offers::refund::Refund::payer_note + pub fn payer_note(&self) -> Option { + self.inner.payer_note().map(|note| note.to_string()) + } + + /// Opaque bytes set by the originating [`Offer`]. + /// + /// From [`Offer::metadata`]; `None` if the invoice was created in response to a [`Refund`] or + /// if the [`Offer`] did not set it. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`Offer::metadata`]: lightning::offers::offer::Offer::metadata + /// [`Refund`]: lightning::offers::refund::Refund + pub fn metadata(&self) -> Option> { + self.inner.metadata().cloned() + } + + /// The quantity of items requested or refunded for. + /// + /// From [`InvoiceRequest::quantity`] or [`Refund::quantity`]. + /// + /// [`Refund::quantity`]: lightning::offers::refund::Refund::quantity + pub fn quantity(&self) -> Option { + self.inner.quantity() + } + + /// Hash that was used for signing the invoice. + pub fn signable_hash(&self) -> Vec { + self.inner.signable_hash().to_vec() + } + + /// A possibly transient pubkey used to sign the invoice request or to send an invoice for a + /// refund in case there are no [`message_paths`]. + /// + /// [`message_paths`]: lightning::offers::invoice::Bolt12Invoice + pub fn payer_signing_pubkey(&self) -> PublicKey { + self.inner.payer_signing_pubkey() + } + + /// The public key used by the recipient to sign invoices. + /// + /// From [`Offer::issuer_signing_pubkey`] and may be `None`; also `None` if the invoice was + /// created in response to a [`Refund`]. + /// + /// [`Offer::issuer_signing_pubkey`]: lightning::offers::offer::Offer::issuer_signing_pubkey + /// [`Refund`]: lightning::offers::refund::Refund + pub fn issuer_signing_pubkey(&self) -> Option { + self.inner.issuer_signing_pubkey() + } + + /// The chain that must be used when paying the invoice; selected from [`offer_chains`] if the + /// invoice originated from an offer. + /// + /// From [`InvoiceRequest::chain`] or [`Refund::chain`]. + /// + /// [`offer_chains`]: lightning::offers::invoice::Bolt12Invoice::offer_chains + /// [`InvoiceRequest::chain`]: lightning::offers::invoice_request::InvoiceRequest::chain + /// [`Refund::chain`]: lightning::offers::refund::Refund::chain + pub fn chain(&self) -> Vec { + self.inner.chain().to_bytes().to_vec() + } + + /// The chains that may be used when paying a requested invoice. + /// + /// From [`Offer::chains`]; `None` if the invoice was created in response to a [`Refund`]. + /// + /// [`Offer::chains`]: lightning::offers::offer::Offer::chains + /// [`Refund`]: lightning::offers::refund::Refund + pub fn offer_chains(&self) -> Option>> { + self.inner + .offer_chains() + .map(|chains| chains.iter().map(|chain| chain.to_bytes().to_vec()).collect()) + } + + /// Fallback addresses for paying the invoice on-chain, in order of most-preferred to + /// least-preferred. + pub fn fallback_addresses(&self) -> Vec
{ + self.inner.fallbacks() + } + + /// Writes `self` out to a `Vec`. + pub fn encode(&self) -> Vec { + self.inner.encode() + } +} + +impl std::str::FromStr for Bolt12Invoice { + type Err = Error; + + fn from_str(invoice_str: &str) -> Result { + if let Some(bytes_vec) = hex_utils::to_vec(invoice_str) { + if let Ok(invoice) = LdkBolt12Invoice::try_from(bytes_vec) { + return Ok(Bolt12Invoice { inner: invoice }); } } - Err(Error::InvalidInvoice.into()) + Err(Error::InvalidInvoice) } +} - fn from_custom(obj: Self) -> Self::Builtin { - hex_utils::to_string(&obj.encode()) +impl From for Bolt12Invoice { + fn from(invoice: LdkBolt12Invoice) -> Self { + Bolt12Invoice { inner: invoice } + } +} + +impl Deref for Bolt12Invoice { + type Target = LdkBolt12Invoice; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for Bolt12Invoice { + fn as_ref(&self) -> &LdkBolt12Invoice { + self.deref() } } @@ -932,7 +1130,7 @@ mod tests { refund::RefundBuilder, }; - fn create_test_invoice() -> (LdkBolt11Invoice, Bolt11Invoice) { + fn create_test_bolt11_invoice() -> (LdkBolt11Invoice, Bolt11Invoice) { let invoice_string = "lnbc1pn8g249pp5f6ytj32ty90jhvw69enf30hwfgdhyymjewywcmfjevflg6s4z86qdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdfjjzh2j9f7ye5qqqqryqqqqthqqpysp5mm832athgcal3m7h35sc29j63lmgzvwc5smfjh2es65elc2ns7dq9qrsgqu2xcje2gsnjp0wn97aknyd3h58an7sjj6nhcrm40846jxphv47958c6th76whmec8ttr2wmg6sxwchvxmsc00kqrzqcga6lvsf9jtqgqy5yexa"; let ldk_invoice: LdkBolt11Invoice = invoice_string.parse().unwrap(); let wrapped_invoice = Bolt11Invoice::from(ldk_invoice.clone()); @@ -991,6 +1189,19 @@ mod tests { (ldk_refund, wrapped_refund) } + fn create_test_bolt12_invoice() -> (LdkBolt12Invoice, Bolt12Invoice) { + let invoice_hex = "0020a5b7104b95f17442d6638143ded62b02c2fda98cdf35841713fd0f44b59286560a000e04682cb028502006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f520227105601015821034b4f0765a115caeff6787a8fb2d976c02467a36aea32901539d76473817937c65904546573745a9c00000068000001000003e75203dee4b3e5d48650caf1faadda53ac6e0dc3f509cc5e9c46defb8aeeec14010348e84fab39b226b1e0696cb6fb40bdb293952c184cf02007fa6e983cd311d189004e7bd75ff9ef069642f2abfa5916099e5a16144e1a6d9b4f246624d3b57d2895d5d2e46fe8661e49717d1663ad2c07b023738370a3e44a960f683040b1862fe36e22347c2dbe429c51af377bdbe01ca0e103f295d1678c68b628957a53a820afcc25763cc67b38aca82067bdf52dc68c061a02575d91c01beca64cc09735c395e91d034841d3e61b58948da631192ce556b85b01028e2284ead4ce184981f4d0f387f8d47295d4fa1dab6a6ae3a417550ac1c8b1aa007b38c926212fbf23154c6ff707621d6eedafc4298b133111d90934bb9d5a2103f0c8e4a3f3daa992334aad300677f23b4285db2ee5caf0a0ecc39c6596c3c4e42318040bec46add3626501f6e422be9c791adc81ea5c83ff0bfa91b7d42bcac0ed128a640fe970da584cff80fd5c12a8ea9b546a2d63515343a933daa21c0000000000000000001800000000000000011d24b2dfac5200000000a404682ca218a820a4a878fb352e63673c05eb07e53563fc8022ff039ad4c66e65848a7cde7ee780aa022710ae03020000b02103800fd75bf6b1e7c5f3fab33a372f6599730e0fae7a30fa4e5c8fbc69c3a87981f0403c9a40e6c9d08e12b0a155101d23a170b4f5b38051b0a0a09a794ce49e820f65d50c8fad7518200d3a28331aa5c668a8f7d70206aaf8bea2e8f05f0904b6e033"; + + let invoice_bytes = hex_utils::to_vec(invoice_hex).expect("Valid hex string"); + + let ldk_invoice = + LdkBolt12Invoice::try_from(invoice_bytes).expect("Valid Bolt12Invoice bytes"); + + let wrapped_invoice = Bolt12Invoice { inner: ldk_invoice.clone() }; + + (ldk_invoice, wrapped_invoice) + } + #[test] fn test_invoice_description_conversion() { let hash = "09d08d4865e8af9266f6cc7c0ae23a1d6bf868207cf8f7c5979b9f6ed850dfb0".to_string(); @@ -1003,7 +1214,7 @@ mod tests { #[test] fn test_bolt11_invoice_basic_properties() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); assert_eq!( ldk_invoice.payment_hash().to_string(), @@ -1029,7 +1240,7 @@ mod tests { #[test] fn test_bolt11_invoice_time_related_fields() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); assert_eq!(ldk_invoice.expiry_time().as_secs(), wrapped_invoice.expiry_time_seconds()); assert_eq!( @@ -1048,7 +1259,7 @@ mod tests { #[test] fn test_bolt11_invoice_description() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); let ldk_description = ldk_invoice.description(); let wrapped_description = wrapped_invoice.description(); @@ -1072,7 +1283,7 @@ mod tests { #[test] fn test_bolt11_invoice_route_hints() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); let wrapped_route_hints = wrapped_invoice.route_hints(); let ldk_route_hints = ldk_invoice.route_hints(); @@ -1091,7 +1302,7 @@ mod tests { #[test] fn test_bolt11_invoice_roundtrip() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); let invoice_str = wrapped_invoice.to_string(); let parsed_invoice: LdkBolt11Invoice = invoice_str.parse().unwrap(); @@ -1284,4 +1495,130 @@ mod tests { assert_eq!(ldk_refund.payer_note().map(|p| p.to_string()), wrapped_refund.payer_note()); } + + #[test] + fn test_bolt12_invoice_properties() { + let (ldk_invoice, wrapped_invoice) = create_test_bolt12_invoice(); + + assert_eq!( + ldk_invoice.payment_hash().0.to_vec(), + wrapped_invoice.payment_hash().0.to_vec() + ); + assert_eq!(ldk_invoice.amount_msats(), wrapped_invoice.amount_msats()); + assert_eq!(ldk_invoice.is_expired(), wrapped_invoice.is_expired()); + + assert_eq!(ldk_invoice.signing_pubkey(), wrapped_invoice.signing_pubkey()); + + assert_eq!(ldk_invoice.created_at().as_secs(), wrapped_invoice.created_at()); + + match (ldk_invoice.absolute_expiry(), wrapped_invoice.absolute_expiry_seconds()) { + (Some(ldk_expiry), Some(wrapped_expiry)) => { + assert_eq!(ldk_expiry.as_secs(), wrapped_expiry); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had an absolute expiry but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had an absolute expiry but LDK invoice did not!"); + }, + } + + assert_eq!(ldk_invoice.relative_expiry().as_secs(), wrapped_invoice.relative_expiry()); + + match (ldk_invoice.description(), wrapped_invoice.description()) { + (Some(ldk_desc), Some(wrapped_desc)) => { + assert_eq!(ldk_desc.to_string(), wrapped_desc); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had a description but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had a description but LDK invoice did not!"); + }, + } + + match (ldk_invoice.issuer(), wrapped_invoice.issuer()) { + (Some(ldk_issuer), Some(wrapped_issuer)) => { + assert_eq!(ldk_issuer.to_string(), wrapped_issuer); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had an issuer but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had an issuer but LDK invoice did not!"); + }, + } + + match (ldk_invoice.payer_note(), wrapped_invoice.payer_note()) { + (Some(ldk_note), Some(wrapped_note)) => { + assert_eq!(ldk_note.to_string(), wrapped_note); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had a payer note but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had a payer note but LDK invoice did not!"); + }, + } + + match (ldk_invoice.metadata(), wrapped_invoice.metadata()) { + (Some(ldk_metadata), Some(wrapped_metadata)) => { + assert_eq!(ldk_metadata.as_slice(), wrapped_metadata.as_slice()); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had metadata but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had metadata but LDK invoice did not!"); + }, + } + + assert_eq!(ldk_invoice.quantity(), wrapped_invoice.quantity()); + + assert_eq!(ldk_invoice.chain().to_bytes().to_vec(), wrapped_invoice.chain()); + + match (ldk_invoice.offer_chains(), wrapped_invoice.offer_chains()) { + (Some(ldk_chains), Some(wrapped_chains)) => { + assert_eq!(ldk_chains.len(), wrapped_chains.len()); + for (i, ldk_chain) in ldk_chains.iter().enumerate() { + assert_eq!(ldk_chain.to_bytes().to_vec(), wrapped_chains[i]); + } + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had offer chains but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had offer chains but LDK invoice did not!"); + }, + } + + let ldk_fallbacks = ldk_invoice.fallbacks(); + let wrapped_fallbacks = wrapped_invoice.fallback_addresses(); + assert_eq!(ldk_fallbacks.len(), wrapped_fallbacks.len()); + for (i, ldk_fallback) in ldk_fallbacks.iter().enumerate() { + assert_eq!(*ldk_fallback, wrapped_fallbacks[i]); + } + + assert_eq!(ldk_invoice.encode(), wrapped_invoice.encode()); + + assert_eq!(ldk_invoice.signable_hash().to_vec(), wrapped_invoice.signable_hash()); + } } diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 74c3ac45f..b9efa3241 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -17,7 +17,6 @@ use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, Payme use crate::types::{ChannelManager, PaymentStore}; use lightning::ln::channelmanager::{PaymentId, Retry}; -use lightning::offers::invoice::Bolt12Invoice; use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::util::string::UntrustedString; @@ -28,6 +27,11 @@ use std::num::NonZeroU64; use std::sync::{Arc, RwLock}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; +#[cfg(not(feature = "uniffi"))] +type Bolt12Invoice = lightning::offers::invoice::Bolt12Invoice; +#[cfg(feature = "uniffi")] +type Bolt12Invoice = Arc; + #[cfg(not(feature = "uniffi"))] type Offer = LdkOffer; #[cfg(feature = "uniffi")] @@ -340,6 +344,7 @@ impl Bolt12Payment { /// retrieve the refund). /// /// [`Refund`]: lightning::offers::refund::Refund + /// [`Bolt12Invoice`]: lightning::offers::invoice::Bolt12Invoice pub fn request_refund_payment(&self, refund: &Refund) -> Result { let refund = maybe_deref(refund); let invoice = self.channel_manager.request_refund_payment(&refund).map_err(|e| { @@ -369,7 +374,7 @@ impl Bolt12Payment { self.payment_store.insert(payment)?; - Ok(invoice) + Ok(maybe_wrap(invoice)) } /// Returns a [`Refund`] object that can be used to offer a refund payment of the amount given. From 520e5aa05f686b5ffd365f1769d96bbe26a0b08c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 10 Jun 2025 13:37:02 +0200 Subject: [PATCH 009/184] Export trait impls in FFI Previously, we moved from a `String` representation to a 'full' `Bolt11Invoice` type. However, we forgot to expose the `Display` implementation in the FFI, leaving now way to retrieve the invoice string. Here, we fix this oversight, and also make a few related changes. --- bindings/ldk_node.udl | 1 + src/ffi/types.rs | 2 +- src/payment/unified_qr.rs | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 505f0db8d..e914fd00e 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -715,6 +715,7 @@ dictionary RouteHintHop { RoutingFees fees; }; +[Traits=(Debug, Display, Eq)] interface Bolt11Invoice { [Throws=NodeError, Name=from_str] constructor([ByRef] string invoice_str); diff --git a/src/ffi/types.rs b/src/ffi/types.rs index bbf730211..d35f2aa2e 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -926,7 +926,7 @@ impl From for RouteHintHop { /// Represents a syntactically and semantically correct lightning BOLT11 invoice. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Bolt11Invoice { - pub inner: LdkBolt11Invoice, + pub(crate) inner: LdkBolt11Invoice, } impl Bolt11Invoice { diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index 125e1d09b..af5ee1c7b 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -188,6 +188,7 @@ impl UnifiedQrPayment { /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki /// [`PaymentId`]: lightning::ln::channelmanager::PaymentId /// [`Txid`]: bitcoin::hash_types::Txid +#[derive(Debug)] pub enum QrPaymentResult { /// An on-chain payment. Onchain { From eff4d77962c652595f5b4d4a80e6e59bbc7c5ada Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 17 Jun 2025 15:15:43 +0200 Subject: [PATCH 010/184] Avoid collision in `Bolt11Invoice::description` When exporting `Display` to bindings, Swift will add a `description` method to the respective object. Unfortunately, this collides with `Bolt11Invoice::description`, which we therefore rename to `Bolt11Invoice::invoice_description`. --- bindings/ldk_node.udl | 2 +- src/ffi/types.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index e914fd00e..36767b790 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -728,7 +728,7 @@ interface Bolt11Invoice { u64 seconds_until_expiry(); boolean is_expired(); boolean would_expire(u64 at_time_seconds); - Bolt11InvoiceDescription description(); + Bolt11InvoiceDescription invoice_description(); u64 min_final_cltv_expiry_delta(); Network network(); Currency currency(); diff --git a/src/ffi/types.rs b/src/ffi/types.rs index d35f2aa2e..c65bb0599 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -985,7 +985,7 @@ impl Bolt11Invoice { } /// Return the description or a hash of it for longer ones - pub fn description(&self) -> Bolt11InvoiceDescription { + pub fn invoice_description(&self) -> Bolt11InvoiceDescription { self.inner.description().into() } From 9a8254d937a9f3b24126b3527645744713323960 Mon Sep 17 00:00:00 2001 From: Andrei Date: Thu, 19 Jun 2025 00:00:00 +0000 Subject: [PATCH 011/184] Log background sync of RGS message with info level Other messages like on-chain wallet sync or fee rate cache update logged with info level --- src/lib.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index e80ca964d..b09f9a9f7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -146,7 +146,9 @@ use types::{ }; pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, UserChannelId}; -use logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; +#[cfg(tokio_unstable)] +use logger::log_trace; +use logger::{log_debug, log_error, log_info, LdkLogger, Logger}; use lightning::chain::BestBlock; use lightning::events::bump_transaction::Wallet as LdkWallet; @@ -285,7 +287,7 @@ impl Node { let now = Instant::now(); match gossip_source.update_rgs_snapshot().await { Ok(updated_timestamp) => { - log_trace!( + log_info!( gossip_sync_logger, "Background sync of RGS gossip data finished in {}ms.", now.elapsed().as_millis() From 6b9751bd140606e5780224fa3ec8e9dea523aa08 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 19 Jun 2025 14:06:43 +0200 Subject: [PATCH 012/184] Fix test code after `Bolt11Invoice::invoice_description` rename .. which we forgot when we made the change. --- src/ffi/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ffi/types.rs b/src/ffi/types.rs index c65bb0599..984e4da8f 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -1262,7 +1262,7 @@ mod tests { let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); let ldk_description = ldk_invoice.description(); - let wrapped_description = wrapped_invoice.description(); + let wrapped_description = wrapped_invoice.invoice_description(); match (ldk_description, &wrapped_description) { ( From efa5b735de912e2a3c06d2474ca5186701e5c07c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 19 Jun 2025 13:09:58 +0200 Subject: [PATCH 013/184] Update `CHANGELOG.md` for v0.6.1 --- CHANGELOG.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29b6f748c..fe613a07b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,21 @@ +# 0.6.1 - Jun. 19, 2025 +This patch release fixes minor issues with the recently-exposed `Bolt11Invoice` +type in bindings. + +## Feature and API updates +- The `Bolt11Invoice::description` method is now exposed as + `Bolt11Invoice::invoice_description` in bindings, to avoid collisions with a + Swift standard method of same name (#576) + +## Bug Fixes and Improvements +- The `Display` implementation of `Bolt11Invoice` is now exposed in bindings, + (re-)allowing to render the invoice as a string. (#574) + +In total, this release features 9 files changed, 549 insertions, 83 deletions, +in 8 commits from 1 author in alphabetical order: + +- Elias Rohrer + # 0.6.0 - Jun. 9, 2025 This sixth minor release mainly fixes an issue that could have left the on-chain wallet unable to spend funds if transactions that had previously been From ec2b24d664b7a3186418526e3062d53d0b88a1f7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 19 Jun 2025 13:25:13 +0200 Subject: [PATCH 014/184] Update Swift files for v0.6.1 --- Package.swift | 4 +- bindings/swift/Sources/LDKNode/LDKNode.swift | 52 +++++++++++++++----- 2 files changed, 42 insertions(+), 14 deletions(-) diff --git a/Package.swift b/Package.swift index 33c5a70b8..78a38f294 100644 --- a/Package.swift +++ b/Package.swift @@ -3,8 +3,8 @@ import PackageDescription -let tag = "v0.6.0" -let checksum = "8bda396624134e0b592bfcc2f977b9aa5ce8c2ee359c032ae3520869ece8851c" +let tag = "v0.6.1" +let checksum = "73f53b615d5bfdf76f2e7233bde17a2a62631292ce506763a7150344230859c8" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" let package = Package( diff --git a/bindings/swift/Sources/LDKNode/LDKNode.swift b/bindings/swift/Sources/LDKNode/LDKNode.swift index 442201d31..20ad658d7 100644 --- a/bindings/swift/Sources/LDKNode/LDKNode.swift +++ b/bindings/swift/Sources/LDKNode/LDKNode.swift @@ -517,12 +517,12 @@ public protocol Bolt11InvoiceProtocol : AnyObject { func currency() -> Currency - func description() -> Bolt11InvoiceDescription - func expiryTimeSeconds() -> UInt64 func fallbackAddresses() -> [Address] + func invoiceDescription() -> Bolt11InvoiceDescription + func isExpired() -> Bool func minFinalCltvExpiryDelta() -> UInt64 @@ -548,6 +548,9 @@ public protocol Bolt11InvoiceProtocol : AnyObject { } open class Bolt11Invoice: + CustomDebugStringConvertible, + CustomStringConvertible, + Equatable, Bolt11InvoiceProtocol { fileprivate let pointer: UnsafeMutableRawPointer! @@ -610,13 +613,6 @@ open func currency() -> Currency { }) } -open func description() -> Bolt11InvoiceDescription { - return try! FfiConverterTypeBolt11InvoiceDescription.lift(try! rustCall() { - uniffi_ldk_node_fn_method_bolt11invoice_description(self.uniffiClonePointer(),$0 - ) -}) -} - open func expiryTimeSeconds() -> UInt64 { return try! FfiConverterUInt64.lift(try! rustCall() { uniffi_ldk_node_fn_method_bolt11invoice_expiry_time_seconds(self.uniffiClonePointer(),$0 @@ -631,6 +627,13 @@ open func fallbackAddresses() -> [Address] { }) } +open func invoiceDescription() -> Bolt11InvoiceDescription { + return try! FfiConverterTypeBolt11InvoiceDescription.lift(try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_invoice_description(self.uniffiClonePointer(),$0 + ) +}) +} + open func isExpired() -> Bool { return try! FfiConverterBool.lift(try! rustCall() { uniffi_ldk_node_fn_method_bolt11invoice_is_expired(self.uniffiClonePointer(),$0 @@ -709,6 +712,31 @@ open func wouldExpire(atTimeSeconds: UInt64) -> Bool { }) } + open var debugDescription: String { + return try! FfiConverterString.lift( + try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_uniffi_trait_debug(self.uniffiClonePointer(),$0 + ) +} + ) + } + open var description: String { + return try! FfiConverterString.lift( + try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_uniffi_trait_display(self.uniffiClonePointer(),$0 + ) +} + ) + } + public static func == (self: Bolt11Invoice, other: Bolt11Invoice) -> Bool { + return try! FfiConverterBool.lift( + try! rustCall() { + uniffi_ldk_node_fn_method_bolt11invoice_uniffi_trait_eq_eq(self.uniffiClonePointer(), + FfiConverterTypeBolt11Invoice.lower(other),$0 + ) +} + ) + } } @@ -9451,15 +9479,15 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_bolt11invoice_currency() != 32179) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11invoice_description() != 9887) { - return InitializationResult.apiChecksumMismatch - } if (uniffi_ldk_node_checksum_method_bolt11invoice_expiry_time_seconds() != 23625) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_bolt11invoice_fallback_addresses() != 55276) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_bolt11invoice_invoice_description() != 395) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_bolt11invoice_is_expired() != 15932) { return InitializationResult.apiChecksumMismatch } From 1199f7d47d42ced6938ddbaf534db09ba7862c46 Mon Sep 17 00:00:00 2001 From: Andrei Date: Fri, 27 Jun 2025 00:00:00 +0000 Subject: [PATCH 015/184] Set log target to module path The `log!()` macro defaults to using the module path as the log target when no target is specified. Explicitly setting the log target to the module path enables better integration with other logging crates, such as `env_logger`, allowing them to filter logs by target (e.g., via `env_logger::Builder::parse_filters()`). --- src/logger.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/logger.rs b/src/logger.rs index d357f018d..bbd24ec20 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -153,6 +153,7 @@ impl LogWriter for Writer { #[cfg(not(feature = "uniffi"))] log::logger().log( &builder + .target(record.module_path) .module_path(Some(record.module_path)) .line(Some(record.line)) .args(format_args!("{}", record.args)) @@ -161,6 +162,7 @@ impl LogWriter for Writer { #[cfg(feature = "uniffi")] log::logger().log( &builder + .target(&record.module_path) .module_path(Some(&record.module_path)) .line(Some(record.line)) .args(format_args!("{}", record.args)) From 3954355a7535811378be8d386d4dad2e18add8db Mon Sep 17 00:00:00 2001 From: tosynthegeek Date: Mon, 30 Jun 2025 09:04:34 +0100 Subject: [PATCH 016/184] Add exponential backoff for sync failures Previously, chain synchronization failures would retry immediately without any delay, which could lead to tight retry loops and high CPU usage during failures. This change introduces exponential backoff for transient errors, starting at 2 seconds and doubling each time up to a maximum of 300 seconds. Persistent errors also now delay retries by the maximum backoff duration to prevent rapid loops while maintaining eventual recovery. Fixes #587 --- src/chain/mod.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index fac8b0e6c..df10ecac2 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -36,7 +36,7 @@ use lightning_transaction_sync::EsploraSyncClient; use lightning_block_sync::gossip::UtxoSource; use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; -use lightning_block_sync::SpvClient; +use lightning_block_sync::{BlockSourceErrorKind, SpvClient}; use bdk_esplora::EsploraAsyncExt; use bdk_wallet::Update as BdkUpdate; @@ -425,6 +425,9 @@ impl ChainSource { "Starting initial synchronization of chain listeners. This might take a while..", ); + let mut backoff = CHAIN_POLLING_INTERVAL_SECS; + const MAX_BACKOFF_SECS: u64 = 300; + loop { let channel_manager_best_block_hash = channel_manager.current_best_block().block_hash; @@ -504,8 +507,24 @@ impl ChainSource { Err(e) => { log_error!(logger, "Failed to synchronize chain listeners: {:?}", e); - tokio::time::sleep(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)) - .await; + if e.kind() == BlockSourceErrorKind::Transient { + log_info!( + logger, + "Transient error syncing chain listeners: {:?}. Retrying in {} seconds.", + e, + backoff + ); + tokio::time::sleep(Duration::from_secs(backoff)).await; + backoff = std::cmp::min(backoff * 2, MAX_BACKOFF_SECS); + } else { + log_error!( + logger, + "Persistent error syncing chain listeners: {:?}. Retrying in {} seconds.", + e, + MAX_BACKOFF_SECS + ); + tokio::time::sleep(Duration::from_secs(MAX_BACKOFF_SECS)).await; + } }, } } From 948f289d7756c98d21f43682c193852c039bd2b9 Mon Sep 17 00:00:00 2001 From: moisesPomilio <93723302+moisesPompilio@users.noreply.github.com> Date: Mon, 30 Jun 2025 21:52:19 -0300 Subject: [PATCH 017/184] Fix CLN crash by waiting for block height sync before channel open (#527) --- tests/integration_tests_cln.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index b6300576c..f77311fb2 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -64,7 +64,17 @@ fn test_cln() { // Setup CLN let sock = "/tmp/lightning-rpc"; let cln_client = LightningRPC::new(&sock); - let cln_info = cln_client.getinfo().unwrap(); + let cln_info = { + loop { + let info = cln_client.getinfo().unwrap(); + // Wait for CLN to sync block height before channel open. + // Prevents crash due to unset blockheight (see LDK Node issue #527). + if info.blockheight > 0 { + break info; + } + std::thread::sleep(std::time::Duration::from_millis(250)); + } + }; let cln_node_id = PublicKey::from_str(&cln_info.id).unwrap(); let cln_address: SocketAddress = match cln_info.binding.first().unwrap() { NetworkAddress::Ipv4 { address, port } => { From c1f27ef4f22b390138d5a4cf30a5f7aa218eee91 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Thu, 19 Jun 2025 18:00:00 +0100 Subject: [PATCH 018/184] prefactor: rename bitcoind_rpc to bitcoind Since we unify RPC and REST sync clients in the following commit, we rename bitcoind_rpc to bitcoind as this appropriately captures the unified client. --- src/chain/{bitcoind_rpc.rs => bitcoind.rs} | 0 src/chain/mod.rs | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename src/chain/{bitcoind_rpc.rs => bitcoind.rs} (100%) diff --git a/src/chain/bitcoind_rpc.rs b/src/chain/bitcoind.rs similarity index 100% rename from src/chain/bitcoind_rpc.rs rename to src/chain/bitcoind.rs diff --git a/src/chain/mod.rs b/src/chain/mod.rs index df10ecac2..4d91ffe86 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -5,10 +5,10 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -mod bitcoind_rpc; +mod bitcoind; mod electrum; -use crate::chain::bitcoind_rpc::{ +use crate::chain::bitcoind::{ BitcoindRpcClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, }; use crate::chain::electrum::ElectrumRuntimeClient; From b7de5f82e087b27274c02988fd8364e8a778b0d0 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 30 Jun 2025 23:08:27 +0100 Subject: [PATCH 019/184] prefactor: prepare client for REST interface support In the next commit, we'll introduce a unified bitcoind client with two variant - RPC and REST. In preparation to support chain syncing via the REST interface, we refactor some methods into helper functions specific to the interface client. --- src/chain/bitcoind.rs | 108 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 89 insertions(+), 19 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 3ca2c221f..bcef474cf 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -54,17 +54,31 @@ impl BitcoindRpcClient { } pub(crate) async fn broadcast_transaction(&self, tx: &Transaction) -> std::io::Result { + Self::broadcast_transaction_inner(self.rpc_client(), tx).await + } + + async fn broadcast_transaction_inner( + rpc_client: Arc, tx: &Transaction, + ) -> std::io::Result { let tx_serialized = bitcoin::consensus::encode::serialize_hex(tx); let tx_json = serde_json::json!(tx_serialized); - self.rpc_client.call_method::("sendrawtransaction", &[tx_json]).await + rpc_client.call_method::("sendrawtransaction", &[tx_json]).await } pub(crate) async fn get_fee_estimate_for_target( &self, num_blocks: usize, estimation_mode: FeeRateEstimationMode, + ) -> std::io::Result { + Self::get_fee_estimate_for_target_inner(self.rpc_client(), num_blocks, estimation_mode) + .await + } + + /// Estimate the fee rate for the provided target number of blocks. + async fn get_fee_estimate_for_target_inner( + rpc_client: Arc, num_blocks: usize, estimation_mode: FeeRateEstimationMode, ) -> std::io::Result { let num_blocks_json = serde_json::json!(num_blocks); let estimation_mode_json = serde_json::json!(estimation_mode); - self.rpc_client + rpc_client .call_method::( "estimatesmartfee", &[num_blocks_json, estimation_mode_json], @@ -74,7 +88,14 @@ impl BitcoindRpcClient { } pub(crate) async fn get_mempool_minimum_fee_rate(&self) -> std::io::Result { - self.rpc_client + Self::get_mempool_minimum_fee_rate_rpc(self.rpc_client()).await + } + + /// Get the minimum mempool fee rate via RPC interface. + async fn get_mempool_minimum_fee_rate_rpc( + rpc_client: Arc, + ) -> std::io::Result { + rpc_client .call_method::("getmempoolinfo", &[]) .await .map(|resp| resp.0) @@ -82,11 +103,17 @@ impl BitcoindRpcClient { pub(crate) async fn get_raw_transaction( &self, txid: &Txid, + ) -> std::io::Result> { + Self::get_raw_transaction_rpc(self.rpc_client(), txid).await + } + + /// Retrieve raw transaction for provided transaction ID via the RPC interface. + async fn get_raw_transaction_rpc( + rpc_client: Arc, txid: &Txid, ) -> std::io::Result> { let txid_hex = bitcoin::consensus::encode::serialize_hex(txid); let txid_json = serde_json::json!(txid_hex); - match self - .rpc_client + match rpc_client .call_method::("getrawtransaction", &[txid_json]) .await { @@ -119,8 +146,13 @@ impl BitcoindRpcClient { } pub(crate) async fn get_raw_mempool(&self) -> std::io::Result> { + Self::get_raw_mempool_rpc(self.rpc_client()).await + } + + /// Retrieves the raw mempool via the RPC interface. + async fn get_raw_mempool_rpc(rpc_client: Arc) -> std::io::Result> { let verbose_flag_json = serde_json::json!(false); - self.rpc_client + rpc_client .call_method::("getrawmempool", &[verbose_flag_json]) .await .map(|resp| resp.0) @@ -128,15 +160,19 @@ impl BitcoindRpcClient { pub(crate) async fn get_mempool_entry( &self, txid: Txid, + ) -> std::io::Result> { + Self::get_mempool_entry_inner(self.rpc_client(), txid).await + } + + /// Retrieves the mempool entry of the provided transaction ID. + async fn get_mempool_entry_inner( + client: Arc, txid: Txid, ) -> std::io::Result> { let txid_hex = bitcoin::consensus::encode::serialize_hex(&txid); let txid_json = serde_json::json!(txid_hex); - match self - .rpc_client - .call_method::("getmempoolentry", &[txid_json]) - .await - { - Ok(resp) => Ok(Some(MempoolEntry { txid, height: resp.height, time: resp.time })), + + match client.call_method::("getmempoolentry", &[txid_json]).await { + Ok(resp) => Ok(Some(MempoolEntry { txid, time: resp.time, height: resp.height })), Err(e) => match e.into_inner() { Some(inner) => { let rpc_error_res: Result, _> = inner.downcast(); @@ -165,9 +201,15 @@ impl BitcoindRpcClient { } pub(crate) async fn update_mempool_entries_cache(&self) -> std::io::Result<()> { + self.update_mempool_entries_cache_inner(&self.mempool_entries_cache).await + } + + async fn update_mempool_entries_cache_inner( + &self, mempool_entries_cache: &tokio::sync::Mutex>, + ) -> std::io::Result<()> { let mempool_txids = self.get_raw_mempool().await?; - let mut mempool_entries_cache = self.mempool_entries_cache.lock().await; + let mut mempool_entries_cache = mempool_entries_cache.lock().await; mempool_entries_cache.retain(|txid, _| mempool_txids.contains(txid)); if let Some(difference) = mempool_txids.len().checked_sub(mempool_entries_cache.capacity()) @@ -210,13 +252,28 @@ impl BitcoindRpcClient { async fn get_mempool_transactions_and_timestamp_at_height( &self, best_processed_height: u32, ) -> std::io::Result> { - let prev_mempool_time = self.latest_mempool_timestamp.load(Ordering::Relaxed); + self.get_mempool_transactions_and_timestamp_at_height_inner( + &self.latest_mempool_timestamp, + &self.mempool_entries_cache, + &self.mempool_txs_cache, + best_processed_height, + ) + .await + } + + async fn get_mempool_transactions_and_timestamp_at_height_inner( + &self, latest_mempool_timestamp: &AtomicU64, + mempool_entries_cache: &tokio::sync::Mutex>, + mempool_txs_cache: &tokio::sync::Mutex>, + best_processed_height: u32, + ) -> std::io::Result> { + let prev_mempool_time = latest_mempool_timestamp.load(Ordering::Relaxed); let mut latest_time = prev_mempool_time; self.update_mempool_entries_cache().await?; - let mempool_entries_cache = self.mempool_entries_cache.lock().await; - let mut mempool_txs_cache = self.mempool_txs_cache.lock().await; + let mempool_entries_cache = mempool_entries_cache.lock().await; + let mut mempool_txs_cache = mempool_txs_cache.lock().await; mempool_txs_cache.retain(|txid, _| mempool_entries_cache.contains_key(txid)); if let Some(difference) = @@ -260,7 +317,7 @@ impl BitcoindRpcClient { } if !txs_to_emit.is_empty() { - self.latest_mempool_timestamp.store(latest_time, Ordering::Release); + latest_mempool_timestamp.store(latest_time, Ordering::Release); } Ok(txs_to_emit) } @@ -272,8 +329,21 @@ impl BitcoindRpcClient { async fn get_evicted_mempool_txids_and_timestamp( &self, unconfirmed_txids: Vec, ) -> std::io::Result> { - let latest_mempool_timestamp = self.latest_mempool_timestamp.load(Ordering::Relaxed); - let mempool_entries_cache = self.mempool_entries_cache.lock().await; + self.get_evicted_mempool_txids_and_timestamp_inner( + &self.latest_mempool_timestamp, + &self.mempool_entries_cache, + unconfirmed_txids, + ) + .await + } + + async fn get_evicted_mempool_txids_and_timestamp_inner( + &self, latest_mempool_timestamp: &AtomicU64, + mempool_entries_cache: &tokio::sync::Mutex>, + unconfirmed_txids: Vec, + ) -> std::io::Result> { + let latest_mempool_timestamp = latest_mempool_timestamp.load(Ordering::Relaxed); + let mempool_entries_cache = mempool_entries_cache.lock().await; let evicted_txids = unconfirmed_txids .into_iter() .filter(|txid| mempool_entries_cache.contains_key(txid)) From 36e4c7f5e10a564c71ca861e8d2b38013c6db244 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Tue, 1 Jul 2025 01:49:36 +0100 Subject: [PATCH 020/184] feat: support chain sourcing via REST interface This commit: - Adds support for syncing data via Bitcoin Core's REST interface. - Unifies the REST and RPC Bitcoin Core API clients --- Cargo.toml | 2 +- bindings/ldk_node.udl | 1 + docker-compose.yml | 5 +- src/builder.rs | 126 +++++++++-- src/chain/bitcoind.rs | 369 +++++++++++++++++++++++++++----- src/chain/mod.rs | 118 ++++++---- src/config.rs | 9 + src/lib.rs | 2 +- tests/common/mod.rs | 23 +- tests/integration_tests_rust.rs | 12 +- 10 files changed, 553 insertions(+), 114 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index bf8bed08c..96e47b260 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,7 @@ lightning-net-tokio = { version = "0.1.0" } lightning-persister = { version = "0.1.0" } lightning-background-processor = { version = "0.1.0", features = ["futures"] } lightning-rapid-gossip-sync = { version = "0.1.0" } -lightning-block-sync = { version = "0.1.0", features = ["rpc-client", "tokio"] } +lightning-block-sync = { version = "0.1.0", features = ["rpc-client", "rest-client", "tokio"] } lightning-transaction-sync = { version = "0.1.0", features = ["esplora-async-https", "time", "electrum"] } lightning-liquidity = { version = "0.1.0", features = ["std"] } diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 36767b790..3c240b43c 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -78,6 +78,7 @@ interface Builder { void set_chain_source_esplora(string server_url, EsploraSyncConfig? config); void set_chain_source_electrum(string server_url, ElectrumSyncConfig? config); void set_chain_source_bitcoind_rpc(string rpc_host, u16 rpc_port, string rpc_user, string rpc_password); + void set_chain_source_bitcoind_rest(string rest_host, u16 rest_port, string rpc_host, u16 rpc_port, string rpc_user, string rpc_password); void set_gossip_source_p2p(); void set_gossip_source_rgs(string rgs_server_url); void set_liquidity_source_lsps1(PublicKey node_id, SocketAddress address, string? token); diff --git a/docker-compose.yml b/docker-compose.yml index 425dc129a..e71fd70fb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,10 +13,11 @@ services: "-rpcbind=0.0.0.0", "-rpcuser=user", "-rpcpassword=pass", - "-fallbackfee=0.00001" + "-fallbackfee=0.00001", + "-rest" ] ports: - - "18443:18443" # Regtest RPC port + - "18443:18443" # Regtest REST and RPC port - "18444:18444" # Regtest P2P port networks: - bitcoin-electrs diff --git a/src/builder.rs b/src/builder.rs index 31a0fee45..a177768f6 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -7,8 +7,9 @@ use crate::chain::{ChainSource, DEFAULT_ESPLORA_SERVER_URL}; use crate::config::{ - default_user_config, may_announce_channel, AnnounceError, Config, ElectrumSyncConfig, - EsploraSyncConfig, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN, + default_user_config, may_announce_channel, AnnounceError, BitcoindRestClientConfig, Config, + ElectrumSyncConfig, EsploraSyncConfig, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, + WALLET_KEYS_SEED_LEN, }; use crate::connection::ConnectionManager; @@ -84,9 +85,21 @@ const LSPS_HARDENED_CHILD_INDEX: u32 = 577; #[derive(Debug, Clone)] enum ChainDataSourceConfig { - Esplora { server_url: String, sync_config: Option }, - Electrum { server_url: String, sync_config: Option }, - BitcoindRpc { rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String }, + Esplora { + server_url: String, + sync_config: Option, + }, + Electrum { + server_url: String, + sync_config: Option, + }, + Bitcoind { + rpc_host: String, + rpc_port: u16, + rpc_user: String, + rpc_password: String, + rest_client_config: Option, + }, } #[derive(Debug, Clone)] @@ -299,13 +312,48 @@ impl NodeBuilder { self } - /// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC - /// endpoint. + /// Configures the [`Node`] instance to connect to a Bitcoin Core node via RPC. + /// + /// This method establishes an RPC connection that enables all essential chain operations including + /// transaction broadcasting and chain data synchronization. + /// + /// ## Parameters: + /// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC + /// connection. pub fn set_chain_source_bitcoind_rpc( &mut self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, ) -> &mut Self { - self.chain_data_source_config = - Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }); + self.chain_data_source_config = Some(ChainDataSourceConfig::Bitcoind { + rpc_host, + rpc_port, + rpc_user, + rpc_password, + rest_client_config: None, + }); + self + } + + /// Configures the [`Node`] instance to synchronize chain data from a Bitcoin Core REST endpoint. + /// + /// This method enables chain data synchronization via Bitcoin Core's REST interface. We pass + /// additional RPC configuration to non-REST-supported API calls like transaction broadcasting. + /// + /// ## Parameters: + /// * `rest_host`, `rest_port` - Required parameters for the Bitcoin Core REST connection. + /// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC + /// connection + pub fn set_chain_source_bitcoind_rest( + &mut self, rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16, + rpc_user: String, rpc_password: String, + ) -> &mut Self { + self.chain_data_source_config = Some(ChainDataSourceConfig::Bitcoind { + rpc_host, + rpc_port, + rpc_user, + rpc_password, + rest_client_config: Some(BitcoindRestClientConfig { rest_host, rest_port }), + }); + self } @@ -716,8 +764,14 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_chain_source_electrum(server_url, sync_config); } - /// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC - /// endpoint. + /// Configures the [`Node`] instance to connect to a Bitcoin Core node via RPC. + /// + /// This method establishes an RPC connection that enables all essential chain operations including + /// transaction broadcasting and chain data synchronization. + /// + /// ## Parameters: + /// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC + /// connection. pub fn set_chain_source_bitcoind_rpc( &self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, ) { @@ -729,6 +783,29 @@ impl ArcedNodeBuilder { ); } + /// Configures the [`Node`] instance to synchronize chain data from a Bitcoin Core REST endpoint. + /// + /// This method enables chain data synchronization via Bitcoin Core's REST interface. We pass + /// additional RPC configuration to non-REST-supported API calls like transaction broadcasting. + /// + /// ## Parameters: + /// * `rest_host`, `rest_port` - Required parameters for the Bitcoin Core REST connection. + /// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC + /// connection + pub fn set_chain_source_bitcoind_rest( + &self, rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16, + rpc_user: String, rpc_password: String, + ) { + self.inner.write().unwrap().set_chain_source_bitcoind_rest( + rest_host, + rest_port, + rpc_host, + rpc_port, + rpc_user, + rpc_password, + ); + } + /// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer /// network. pub fn set_gossip_source_p2p(&self) { @@ -1068,8 +1145,14 @@ fn build_with_store_internal( Arc::clone(&node_metrics), )) }, - Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }) => { - Arc::new(ChainSource::new_bitcoind_rpc( + Some(ChainDataSourceConfig::Bitcoind { + rpc_host, + rpc_port, + rpc_user, + rpc_password, + rest_client_config, + }) => match rest_client_config { + Some(rest_client_config) => Arc::new(ChainSource::new_bitcoind_rest( rpc_host.clone(), *rpc_port, rpc_user.clone(), @@ -1079,10 +1162,25 @@ fn build_with_store_internal( Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), Arc::clone(&config), + rest_client_config.clone(), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + )), + None => Arc::new(ChainSource::new_bitcoind_rpc( + rpc_host.clone(), + *rpc_port, + rpc_user.clone(), + rpc_password.clone(), + Arc::clone(&wallet), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), + Arc::clone(&logger), + Arc::clone(&node_metrics), + )), }, + None => { // Default to Esplora client. let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index bcef474cf..98e77cac7 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -7,11 +7,14 @@ use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use bitcoin::{BlockHash, FeeRate, Transaction, Txid}; use lightning::chain::Listen; - -use lightning_block_sync::http::HttpEndpoint; -use lightning_block_sync::http::JsonResponse; +use lightning_block_sync::gossip::UtxoSource; +use lightning_block_sync::http::{HttpEndpoint, JsonResponse}; use lightning_block_sync::poll::ValidatedBlockHeader; +use lightning_block_sync::rest::RestClient; use lightning_block_sync::rpc::{RpcClient, RpcError}; use lightning_block_sync::{ AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, @@ -19,26 +22,31 @@ use lightning_block_sync::{ use serde::Serialize; -use bitcoin::{BlockHash, FeeRate, Transaction, Txid}; - -use base64::prelude::{Engine, BASE64_STANDARD}; - use std::collections::{HashMap, VecDeque}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; -pub struct BitcoindRpcClient { - rpc_client: Arc, - latest_mempool_timestamp: AtomicU64, - mempool_entries_cache: tokio::sync::Mutex>, - mempool_txs_cache: tokio::sync::Mutex>, +pub enum BitcoindClient { + Rpc { + rpc_client: Arc, + latest_mempool_timestamp: AtomicU64, + mempool_entries_cache: tokio::sync::Mutex>, + mempool_txs_cache: tokio::sync::Mutex>, + }, + Rest { + rest_client: Arc, + rpc_client: Arc, + latest_mempool_timestamp: AtomicU64, + mempool_entries_cache: tokio::sync::Mutex>, + mempool_txs_cache: tokio::sync::Mutex>, + }, } -impl BitcoindRpcClient { - pub(crate) fn new(host: String, port: u16, rpc_user: String, rpc_password: String) -> Self { - let http_endpoint = HttpEndpoint::for_host(host.clone()).with_port(port); - let rpc_credentials = - BASE64_STANDARD.encode(format!("{}:{}", rpc_user.clone(), rpc_password.clone())); +impl BitcoindClient { + /// Creates a new RPC API client for the chain interactions with Bitcoin Core. + pub(crate) fn new_rpc(host: String, port: u16, rpc_user: String, rpc_password: String) -> Self { + let http_endpoint = endpoint(host, port); + let rpc_credentials = rpc_credentials(rpc_user, rpc_password); let rpc_client = Arc::new(RpcClient::new(&rpc_credentials, http_endpoint)); @@ -46,15 +54,60 @@ impl BitcoindRpcClient { let mempool_entries_cache = tokio::sync::Mutex::new(HashMap::new()); let mempool_txs_cache = tokio::sync::Mutex::new(HashMap::new()); - Self { rpc_client, latest_mempool_timestamp, mempool_entries_cache, mempool_txs_cache } + Self::Rpc { rpc_client, latest_mempool_timestamp, mempool_entries_cache, mempool_txs_cache } } - pub(crate) fn rpc_client(&self) -> Arc { - Arc::clone(&self.rpc_client) + /// Creates a new, primarily REST API client for the chain interactions + /// with Bitcoin Core. + /// + /// Aside the required REST host and port, we provide RPC configuration + /// options for necessary calls not supported by the REST interface. + pub(crate) fn new_rest( + rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16, rpc_user: String, + rpc_password: String, + ) -> Self { + let rest_endpoint = endpoint(rest_host, rest_port).with_path("/rest".to_string()); + let rest_client = Arc::new(RestClient::new(rest_endpoint)); + + let rpc_endpoint = endpoint(rpc_host, rpc_port); + let rpc_credentials = rpc_credentials(rpc_user, rpc_password); + let rpc_client = Arc::new(RpcClient::new(&rpc_credentials, rpc_endpoint)); + + let latest_mempool_timestamp = AtomicU64::new(0); + + let mempool_entries_cache = tokio::sync::Mutex::new(HashMap::new()); + let mempool_txs_cache = tokio::sync::Mutex::new(HashMap::new()); + + Self::Rest { + rest_client, + rpc_client, + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + } } + pub(crate) fn utxo_source(&self) -> Arc { + match self { + BitcoindClient::Rpc { rpc_client, .. } => Arc::clone(rpc_client) as Arc, + BitcoindClient::Rest { rest_client, .. } => { + Arc::clone(rest_client) as Arc + }, + } + } + + /// Broadcasts the provided transaction. pub(crate) async fn broadcast_transaction(&self, tx: &Transaction) -> std::io::Result { - Self::broadcast_transaction_inner(self.rpc_client(), tx).await + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::broadcast_transaction_inner(Arc::clone(rpc_client), tx).await + }, + BitcoindClient::Rest { rpc_client, .. } => { + // Bitcoin Core's REST interface does not support broadcasting transactions + // so we use the RPC client. + Self::broadcast_transaction_inner(Arc::clone(rpc_client), tx).await + }, + } } async fn broadcast_transaction_inner( @@ -65,11 +118,31 @@ impl BitcoindRpcClient { rpc_client.call_method::("sendrawtransaction", &[tx_json]).await } + /// Retrieve the fee estimate needed for a transaction to begin + /// confirmation within the provided `num_blocks`. pub(crate) async fn get_fee_estimate_for_target( &self, num_blocks: usize, estimation_mode: FeeRateEstimationMode, ) -> std::io::Result { - Self::get_fee_estimate_for_target_inner(self.rpc_client(), num_blocks, estimation_mode) - .await + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_fee_estimate_for_target_inner( + Arc::clone(rpc_client), + num_blocks, + estimation_mode, + ) + .await + }, + BitcoindClient::Rest { rpc_client, .. } => { + // We rely on the internal RPC client to make this call, as this + // operation is not supported by Bitcoin Core's REST interface. + Self::get_fee_estimate_for_target_inner( + Arc::clone(rpc_client), + num_blocks, + estimation_mode, + ) + .await + }, + } } /// Estimate the fee rate for the provided target number of blocks. @@ -87,11 +160,19 @@ impl BitcoindRpcClient { .map(|resp| resp.0) } + /// Gets the mempool minimum fee rate. pub(crate) async fn get_mempool_minimum_fee_rate(&self) -> std::io::Result { - Self::get_mempool_minimum_fee_rate_rpc(self.rpc_client()).await + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_mempool_minimum_fee_rate_rpc(Arc::clone(rpc_client)).await + }, + BitcoindClient::Rest { rest_client, .. } => { + Self::get_mempool_minimum_fee_rate_rest(Arc::clone(rest_client)).await + }, + } } - /// Get the minimum mempool fee rate via RPC interface. + /// Get the mempool minimum fee rate via RPC interface. async fn get_mempool_minimum_fee_rate_rpc( rpc_client: Arc, ) -> std::io::Result { @@ -101,10 +182,28 @@ impl BitcoindRpcClient { .map(|resp| resp.0) } + /// Get the mempool minimum fee rate via REST interface. + async fn get_mempool_minimum_fee_rate_rest( + rest_client: Arc, + ) -> std::io::Result { + rest_client + .request_resource::("mempool/info.json") + .await + .map(|resp| resp.0) + } + + /// Gets the raw transaction for the provided transaction ID. Returns `None` if not found. pub(crate) async fn get_raw_transaction( &self, txid: &Txid, ) -> std::io::Result> { - Self::get_raw_transaction_rpc(self.rpc_client(), txid).await + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_raw_transaction_rpc(Arc::clone(rpc_client), txid).await + }, + BitcoindClient::Rest { rest_client, .. } => { + Self::get_raw_transaction_rest(Arc::clone(rest_client), txid).await + }, + } } /// Retrieve raw transaction for provided transaction ID via the RPC interface. @@ -145,8 +244,68 @@ impl BitcoindRpcClient { } } + /// Retrieve raw transaction for provided transaction ID via the REST interface. + async fn get_raw_transaction_rest( + rest_client: Arc, txid: &Txid, + ) -> std::io::Result> { + let txid_hex = bitcoin::consensus::encode::serialize_hex(txid); + let tx_path = format!("tx/{}.json", txid_hex); + match rest_client + .request_resource::(&tx_path) + .await + { + Ok(resp) => Ok(Some(resp.0)), + Err(e) => match e.kind() { + std::io::ErrorKind::Other => { + match e.into_inner() { + Some(inner) => { + let http_error_res: Result, _> = inner.downcast(); + match http_error_res { + Ok(http_error) => { + // Check if it's the HTTP NOT_FOUND error code. + if &http_error.status_code == "404" { + Ok(None) + } else { + Err(std::io::Error::new( + std::io::ErrorKind::Other, + http_error, + )) + } + }, + Err(_) => { + let error_msg = + format!("Failed to process {} response.", tx_path); + Err(std::io::Error::new( + std::io::ErrorKind::Other, + error_msg.as_str(), + )) + }, + } + }, + None => { + let error_msg = format!("Failed to process {} response.", tx_path); + Err(std::io::Error::new(std::io::ErrorKind::Other, error_msg.as_str())) + }, + } + }, + _ => { + let error_msg = format!("Failed to process {} response.", tx_path); + Err(std::io::Error::new(std::io::ErrorKind::Other, error_msg.as_str())) + }, + }, + } + } + + /// Retrieves the raw mempool. pub(crate) async fn get_raw_mempool(&self) -> std::io::Result> { - Self::get_raw_mempool_rpc(self.rpc_client()).await + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_raw_mempool_rpc(Arc::clone(rpc_client)).await + }, + BitcoindClient::Rest { rest_client, .. } => { + Self::get_raw_mempool_rest(Arc::clone(rest_client)).await + }, + } } /// Retrieves the raw mempool via the RPC interface. @@ -158,10 +317,28 @@ impl BitcoindRpcClient { .map(|resp| resp.0) } + /// Retrieves the raw mempool via the REST interface. + async fn get_raw_mempool_rest(rest_client: Arc) -> std::io::Result> { + rest_client + .request_resource::( + "mempool/contents.json?verbose=false", + ) + .await + .map(|resp| resp.0) + } + + /// Retrieves an entry from the mempool if it exists, else return `None`. pub(crate) async fn get_mempool_entry( &self, txid: Txid, ) -> std::io::Result> { - Self::get_mempool_entry_inner(self.rpc_client(), txid).await + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_mempool_entry_inner(Arc::clone(rpc_client), txid).await + }, + BitcoindClient::Rest { rpc_client, .. } => { + Self::get_mempool_entry_inner(Arc::clone(rpc_client), txid).await + }, + } } /// Retrieves the mempool entry of the provided transaction ID. @@ -201,7 +378,14 @@ impl BitcoindRpcClient { } pub(crate) async fn update_mempool_entries_cache(&self) -> std::io::Result<()> { - self.update_mempool_entries_cache_inner(&self.mempool_entries_cache).await + match self { + BitcoindClient::Rpc { mempool_entries_cache, .. } => { + self.update_mempool_entries_cache_inner(mempool_entries_cache).await + }, + BitcoindClient::Rest { mempool_entries_cache, .. } => { + self.update_mempool_entries_cache_inner(mempool_entries_cache).await + }, + } } async fn update_mempool_entries_cache_inner( @@ -249,16 +433,39 @@ impl BitcoindRpcClient { /// This method is an adapted version of `bdk_bitcoind_rpc::Emitter::mempool`. It emits each /// transaction only once, unless we cannot assume the transaction's ancestors are already /// emitted. - async fn get_mempool_transactions_and_timestamp_at_height( + pub(crate) async fn get_mempool_transactions_and_timestamp_at_height( &self, best_processed_height: u32, ) -> std::io::Result> { - self.get_mempool_transactions_and_timestamp_at_height_inner( - &self.latest_mempool_timestamp, - &self.mempool_entries_cache, - &self.mempool_txs_cache, - best_processed_height, - ) - .await + match self { + BitcoindClient::Rpc { + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + .. + } => { + self.get_mempool_transactions_and_timestamp_at_height_inner( + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + best_processed_height, + ) + .await + }, + BitcoindClient::Rest { + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + .. + } => { + self.get_mempool_transactions_and_timestamp_at_height_inner( + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + best_processed_height, + ) + .await + }, + } } async fn get_mempool_transactions_and_timestamp_at_height_inner( @@ -329,16 +536,28 @@ impl BitcoindRpcClient { async fn get_evicted_mempool_txids_and_timestamp( &self, unconfirmed_txids: Vec, ) -> std::io::Result> { - self.get_evicted_mempool_txids_and_timestamp_inner( - &self.latest_mempool_timestamp, - &self.mempool_entries_cache, - unconfirmed_txids, - ) - .await + match self { + BitcoindClient::Rpc { latest_mempool_timestamp, mempool_entries_cache, .. } => { + Self::get_evicted_mempool_txids_and_timestamp_inner( + latest_mempool_timestamp, + mempool_entries_cache, + unconfirmed_txids, + ) + .await + }, + BitcoindClient::Rest { latest_mempool_timestamp, mempool_entries_cache, .. } => { + Self::get_evicted_mempool_txids_and_timestamp_inner( + latest_mempool_timestamp, + mempool_entries_cache, + unconfirmed_txids, + ) + .await + }, + } } async fn get_evicted_mempool_txids_and_timestamp_inner( - &self, latest_mempool_timestamp: &AtomicU64, + latest_mempool_timestamp: &AtomicU64, mempool_entries_cache: &tokio::sync::Mutex>, unconfirmed_txids: Vec, ) -> std::io::Result> { @@ -353,21 +572,42 @@ impl BitcoindRpcClient { } } -impl BlockSource for BitcoindRpcClient { +impl BlockSource for BitcoindClient { fn get_header<'a>( - &'a self, header_hash: &'a BlockHash, height_hint: Option, + &'a self, header_hash: &'a bitcoin::BlockHash, height_hint: Option, ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { self.rpc_client.get_header(header_hash, height_hint).await }) + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Box::pin(async move { rpc_client.get_header(header_hash, height_hint).await }) + }, + BitcoindClient::Rest { rest_client, .. } => { + Box::pin(async move { rest_client.get_header(header_hash, height_hint).await }) + }, + } } fn get_block<'a>( - &'a self, header_hash: &'a BlockHash, + &'a self, header_hash: &'a bitcoin::BlockHash, ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { self.rpc_client.get_block(header_hash).await }) + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Box::pin(async move { rpc_client.get_block(header_hash).await }) + }, + BitcoindClient::Rest { rest_client, .. } => { + Box::pin(async move { rest_client.get_block(header_hash).await }) + }, + } } - fn get_best_block(&self) -> AsyncBlockSourceResult<(BlockHash, Option)> { - Box::pin(async move { self.rpc_client.get_best_block().await }) + fn get_best_block(&self) -> AsyncBlockSourceResult<(bitcoin::BlockHash, Option)> { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Box::pin(async move { rpc_client.get_best_block().await }) + }, + BitcoindClient::Rest { rest_client, .. } => { + Box::pin(async move { rest_client.get_best_block().await }) + }, + } } } @@ -395,7 +635,7 @@ impl TryInto for JsonResponse { } } -pub struct MempoolMinFeeResponse(pub FeeRate); +pub(crate) struct MempoolMinFeeResponse(pub FeeRate); impl TryInto for JsonResponse { type Error = std::io::Error; @@ -413,7 +653,7 @@ impl TryInto for JsonResponse { } } -pub struct GetRawTransactionResponse(pub Transaction); +pub(crate) struct GetRawTransactionResponse(pub Transaction); impl TryInto for JsonResponse { type Error = std::io::Error; @@ -600,3 +840,26 @@ impl Listen for ChainListener { self.output_sweeper.block_disconnected(header, height); } } + +pub(crate) fn rpc_credentials(rpc_user: String, rpc_password: String) -> String { + BASE64_STANDARD.encode(format!("{}:{}", rpc_user, rpc_password)) +} + +pub(crate) fn endpoint(host: String, port: u16) -> HttpEndpoint { + HttpEndpoint::for_host(host).with_port(port) +} + +#[derive(Debug)] +pub struct HttpError { + pub(crate) status_code: String, + pub(crate) contents: Vec, +} + +impl std::error::Error for HttpError {} + +impl std::fmt::Display for HttpError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let contents = String::from_utf8_lossy(&self.contents); + write!(f, "status_code: {}, contents: {}", self.status_code, contents) + } +} diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 4d91ffe86..c3d5fdedc 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -9,14 +9,15 @@ mod bitcoind; mod electrum; use crate::chain::bitcoind::{ - BitcoindRpcClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, + BitcoindClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, }; use crate::chain::electrum::ElectrumRuntimeClient; use crate::config::{ - BackgroundSyncConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, - BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, - LDK_WALLET_SYNC_TIMEOUT_SECS, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, - TX_BROADCAST_TIMEOUT_SECS, WALLET_SYNC_INTERVAL_MINIMUM_SECS, + BackgroundSyncConfig, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, + BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, + FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, TX_BROADCAST_TIMEOUT_SECS, + WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, @@ -215,8 +216,8 @@ pub(crate) enum ChainSource { logger: Arc, node_metrics: Arc>, }, - BitcoindRpc { - bitcoind_rpc_client: Arc, + Bitcoind { + api_client: Arc, header_cache: tokio::sync::Mutex, latest_chain_tip: RwLock>, onchain_wallet: Arc, @@ -293,18 +294,23 @@ impl ChainSource { } pub(crate) fn new_bitcoind_rpc( - host: String, port: u16, rpc_user: String, rpc_password: String, + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, onchain_wallet: Arc, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, ) -> Self { - let bitcoind_rpc_client = - Arc::new(BitcoindRpcClient::new(host, port, rpc_user, rpc_password)); + let api_client = Arc::new(BitcoindClient::new_rpc( + rpc_host.clone(), + rpc_port.clone(), + rpc_user.clone(), + rpc_password.clone(), + )); + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); let latest_chain_tip = RwLock::new(None); let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); - Self::BitcoindRpc { - bitcoind_rpc_client, + Self::Bitcoind { + api_client, header_cache, latest_chain_tip, onchain_wallet, @@ -318,6 +324,41 @@ impl ChainSource { } } + pub(crate) fn new_bitcoind_rest( + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + rest_client_config: BitcoindRestClientConfig, logger: Arc, + node_metrics: Arc>, + ) -> Self { + let api_client = Arc::new(BitcoindClient::new_rest( + rest_client_config.rest_host, + rest_client_config.rest_port, + rpc_host, + rpc_port, + rpc_user, + rpc_password, + )); + + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); + let latest_chain_tip = RwLock::new(None); + let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); + + Self::Bitcoind { + api_client, + header_cache, + latest_chain_tip, + wallet_polling_status, + onchain_wallet, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger, + node_metrics, + } + } + pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { match self { Self::Electrum { server_url, electrum_runtime_status, config, logger, .. } => { @@ -348,7 +389,7 @@ impl ChainSource { pub(crate) fn as_utxo_source(&self) -> Option> { match self { - Self::BitcoindRpc { bitcoind_rpc_client, .. } => Some(bitcoind_rpc_client.rpc_client()), + Self::Bitcoind { api_client, .. } => Some(api_client.utxo_source()), _ => None, } } @@ -399,8 +440,8 @@ impl ChainSource { return; } }, - Self::BitcoindRpc { - bitcoind_rpc_client, + Self::Bitcoind { + api_client, header_cache, latest_chain_tip, onchain_wallet, @@ -469,7 +510,7 @@ impl ChainSource { let mut locked_header_cache = header_cache.lock().await; let now = SystemTime::now(); match synchronize_listeners( - bitcoind_rpc_client.as_ref(), + api_client.as_ref(), config.network, &mut *locked_header_cache, chain_listeners.clone(), @@ -836,8 +877,8 @@ impl ChainSource { res }, - Self::BitcoindRpc { .. } => { - // In BitcoindRpc mode we sync lightning and onchain wallet in one go by via + Self::Bitcoind { .. } => { + // In BitcoindRpc mode we sync lightning and onchain wallet in one go via // `ChainPoller`. So nothing to do here. unreachable!("Onchain wallet will be synced via chain polling") }, @@ -1006,8 +1047,8 @@ impl ChainSource { res }, - Self::BitcoindRpc { .. } => { - // In BitcoindRpc mode we sync lightning and onchain wallet in one go by via + Self::Bitcoind { .. } => { + // In BitcoindRpc mode we sync lightning and onchain wallet in one go via // `ChainPoller`. So nothing to do here. unreachable!("Lightning wallet will be synced via chain polling") }, @@ -1029,8 +1070,8 @@ impl ChainSource { // `sync_onchain_wallet` and `sync_lightning_wallet`. So nothing to do here. unreachable!("Listeners will be synced via transction-based syncing") }, - Self::BitcoindRpc { - bitcoind_rpc_client, + Self::Bitcoind { + api_client, header_cache, latest_chain_tip, onchain_wallet, @@ -1059,7 +1100,7 @@ impl ChainSource { let chain_tip = if let Some(tip) = latest_chain_tip_opt { tip } else { - match validate_best_block_header(bitcoind_rpc_client.as_ref()).await { + match validate_best_block_header(api_client.as_ref()).await { Ok(tip) => { *latest_chain_tip.write().unwrap() = Some(tip); tip @@ -1077,8 +1118,7 @@ impl ChainSource { }; let mut locked_header_cache = header_cache.lock().await; - let chain_poller = - ChainPoller::new(Arc::clone(&bitcoind_rpc_client), config.network); + let chain_poller = ChainPoller::new(Arc::clone(&api_client), config.network); let chain_listener = ChainListener { onchain_wallet: Arc::clone(&onchain_wallet), channel_manager: Arc::clone(&channel_manager), @@ -1115,7 +1155,7 @@ impl ChainSource { let now = SystemTime::now(); let unconfirmed_txids = onchain_wallet.get_unconfirmed_txids(); - match bitcoind_rpc_client + match api_client .get_updated_mempool_transactions(cur_height, unconfirmed_txids) .await { @@ -1300,8 +1340,8 @@ impl ChainSource { Ok(()) }, - Self::BitcoindRpc { - bitcoind_rpc_client, + Self::Bitcoind { + api_client, fee_estimator, config, kv_store, @@ -1332,7 +1372,7 @@ impl ChainSource { ConfirmationTarget::Lightning( LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, ) => { - let estimation_fut = bitcoind_rpc_client.get_mempool_minimum_fee_rate(); + let estimation_fut = api_client.get_mempool_minimum_fee_rate(); get_fee_rate_update!(estimation_fut) }, ConfirmationTarget::Lightning( @@ -1340,8 +1380,8 @@ impl ChainSource { ) => { let num_blocks = get_num_block_defaults_for_target(target); let estimation_mode = FeeRateEstimationMode::Conservative; - let estimation_fut = bitcoind_rpc_client - .get_fee_estimate_for_target(num_blocks, estimation_mode); + let estimation_fut = + api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); get_fee_rate_update!(estimation_fut) }, ConfirmationTarget::Lightning( @@ -1349,16 +1389,16 @@ impl ChainSource { ) => { let num_blocks = get_num_block_defaults_for_target(target); let estimation_mode = FeeRateEstimationMode::Conservative; - let estimation_fut = bitcoind_rpc_client - .get_fee_estimate_for_target(num_blocks, estimation_mode); + let estimation_fut = + api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); get_fee_rate_update!(estimation_fut) }, _ => { // Otherwise, we default to economical block-target estimate. let num_blocks = get_num_block_defaults_for_target(target); let estimation_mode = FeeRateEstimationMode::Economical; - let estimation_fut = bitcoind_rpc_client - .get_fee_estimate_for_target(num_blocks, estimation_mode); + let estimation_fut = + api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); get_fee_rate_update!(estimation_fut) }, }; @@ -1530,7 +1570,7 @@ impl ChainSource { } } }, - Self::BitcoindRpc { bitcoind_rpc_client, tx_broadcaster, logger, .. } => { + Self::Bitcoind { api_client, tx_broadcaster, logger, .. } => { // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 // features, we should eventually switch to use `submitpackage` via the // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual @@ -1541,7 +1581,7 @@ impl ChainSource { let txid = tx.compute_txid(); let timeout_fut = tokio::time::timeout( Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - bitcoind_rpc_client.broadcast_transaction(tx), + api_client.broadcast_transaction(tx), ); match timeout_fut.await { Ok(res) => match res { @@ -1595,7 +1635,7 @@ impl Filter for ChainSource { Self::Electrum { electrum_runtime_status, .. } => { electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) }, - Self::BitcoindRpc { .. } => (), + Self::Bitcoind { .. } => (), } } fn register_output(&self, output: lightning::chain::WatchedOutput) { @@ -1604,7 +1644,7 @@ impl Filter for ChainSource { Self::Electrum { electrum_runtime_status, .. } => { electrum_runtime_status.write().unwrap().register_output(output) }, - Self::BitcoindRpc { .. } => (), + Self::Bitcoind { .. } => (), } } } diff --git a/src/config.rs b/src/config.rs index 4a39c1b56..a2930ea5a 100644 --- a/src/config.rs +++ b/src/config.rs @@ -397,6 +397,15 @@ impl Default for ElectrumSyncConfig { } } +/// Configuration for syncing with Bitcoin Core backend via REST. +#[derive(Debug, Clone)] +pub struct BitcoindRestClientConfig { + /// Host URL. + pub rest_host: String, + /// Host port. + pub rest_port: u16, +} + /// Options which apply on a per-channel basis and may change at runtime or based on negotiation /// with our counterparty. #[derive(Copy, Clone, Debug, PartialEq, Eq)] diff --git a/src/lib.rs b/src/lib.rs index b09f9a9f7..a75da763a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1277,7 +1277,7 @@ impl Node { .await?; chain_source.sync_onchain_wallet().await?; }, - ChainSource::BitcoindRpc { .. } => { + ChainSource::Bitcoind { .. } => { chain_source.update_fee_rate_estimates().await?; chain_source .poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 3258df791..daed86475 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -174,6 +174,7 @@ pub(crate) fn setup_bitcoind_and_electrsd() -> (BitcoinD, ElectrsD) { ); let mut bitcoind_conf = corepc_node::Conf::default(); bitcoind_conf.network = "regtest"; + bitcoind_conf.args.push("-rest"); let bitcoind = BitcoinD::with_conf(bitcoind_exe, &bitcoind_conf).unwrap(); let electrs_exe = env::var("ELECTRS_EXE") @@ -256,7 +257,8 @@ type TestNode = Node; pub(crate) enum TestChainSource<'a> { Esplora(&'a ElectrsD), Electrum(&'a ElectrsD), - BitcoindRpc(&'a BitcoinD), + BitcoindRpcSync(&'a BitcoinD), + BitcoindRestSync(&'a BitcoinD), } #[derive(Clone, Default)] @@ -317,7 +319,7 @@ pub(crate) fn setup_node( let sync_config = ElectrumSyncConfig { background_sync_config: None }; builder.set_chain_source_electrum(electrum_url.clone(), Some(sync_config)); }, - TestChainSource::BitcoindRpc(bitcoind) => { + TestChainSource::BitcoindRpcSync(bitcoind) => { let rpc_host = bitcoind.params.rpc_socket.ip().to_string(); let rpc_port = bitcoind.params.rpc_socket.port(); let values = bitcoind.params.get_cookie_values().unwrap().unwrap(); @@ -325,6 +327,23 @@ pub(crate) fn setup_node( let rpc_password = values.password; builder.set_chain_source_bitcoind_rpc(rpc_host, rpc_port, rpc_user, rpc_password); }, + TestChainSource::BitcoindRestSync(bitcoind) => { + let rpc_host = bitcoind.params.rpc_socket.ip().to_string(); + let rpc_port = bitcoind.params.rpc_socket.port(); + let values = bitcoind.params.get_cookie_values().unwrap().unwrap(); + let rpc_user = values.user; + let rpc_password = values.password; + let rest_host = bitcoind.params.rpc_socket.ip().to_string(); + let rest_port = bitcoind.params.rpc_socket.port(); + builder.set_chain_source_bitcoind_rest( + rest_host, + rest_port, + rpc_host, + rpc_port, + rpc_user, + rpc_password, + ); + }, } match &config.log_writer { diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index db48eca23..fbd95ef50 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -56,9 +56,17 @@ fn channel_full_cycle_electrum() { } #[test] -fn channel_full_cycle_bitcoind() { +fn channel_full_cycle_bitcoind_rpc_sync() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::BitcoindRpc(&bitcoind); + let chain_source = TestChainSource::BitcoindRpcSync(&bitcoind); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); +} + +#[test] +fn channel_full_cycle_bitcoind_rest_sync() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::BitcoindRestSync(&bitcoind); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); } From ed96316bc7b20b27d395f2e18270d047962f4cf5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 1 Jul 2025 13:29:49 +0200 Subject: [PATCH 021/184] Bump `uniffi` dependency to v0.28.3 We bump our `uniffi` dependency to v0.28.3 to unlock some of the nicer features `uniffi` added since the previously-used v0.27.3. However, we can't bump it further to v0.29.3, as we have users requiring compatibility with `uniffi-bindgen-go`, which only supports v0.28.3 at the time of writing. --- Cargo.toml | 4 ++-- bindings/uniffi-bindgen/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index bf8bed08c..7fc1123e1 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,7 +84,7 @@ esplora-client = { version = "0.12", default-features = false, features = ["toki esplora-client_0_11 = { package = "esplora-client", version = "0.11", default-features = false, features = ["tokio", "async-https-rustls"] } electrum-client = { version = "0.23.1", default-features = true } libc = "0.2" -uniffi = { version = "0.27.3", features = ["build"], optional = true } +uniffi = { version = "0.28.3", features = ["build"], optional = true } serde = { version = "1.0.210", default-features = false, features = ["std", "derive"] } serde_json = { version = "1.0.128", default-features = false, features = ["std"] } log = { version = "0.4.22", default-features = false, features = ["std"]} @@ -117,7 +117,7 @@ lnd_grpc_rust = { version = "2.10.0", default-features = false } tokio = { version = "1.37", features = ["fs"] } [build-dependencies] -uniffi = { version = "0.27.3", features = ["build"], optional = true } +uniffi = { version = "0.28.3", features = ["build"], optional = true } [profile.release] panic = "abort" diff --git a/bindings/uniffi-bindgen/Cargo.toml b/bindings/uniffi-bindgen/Cargo.toml index 9a4c9d5da..a33c0f9ae 100644 --- a/bindings/uniffi-bindgen/Cargo.toml +++ b/bindings/uniffi-bindgen/Cargo.toml @@ -6,4 +6,4 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -uniffi = { version = "0.27.3", features = ["cli"] } +uniffi = { version = "0.28.3", features = ["cli"] } From 6fe2d30c76107f4968b712ac809cfaae38736ed0 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 13 May 2025 14:04:46 +0200 Subject: [PATCH 022/184] Reduce syncing and shutdown timeouts considerably Previously, we had to configure enormous syncing timeouts as the BDK wallet syncing would hold a central mutex that could lead to large parts of event handling and syncing locking up. Here, we drop the configured timeouts considerably across the board, since such huge values are hopefully not required anymore. --- src/chain/electrum.rs | 2 +- src/config.rs | 7 +++++-- src/lib.rs | 10 ++++------ 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 6e62d9c08..9882e652b 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -40,7 +40,7 @@ use std::time::{Duration, Instant}; const BDK_ELECTRUM_CLIENT_BATCH_SIZE: usize = 5; const ELECTRUM_CLIENT_NUM_RETRIES: u8 = 3; -const ELECTRUM_CLIENT_TIMEOUT_SECS: u8 = 20; +const ELECTRUM_CLIENT_TIMEOUT_SECS: u8 = 10; pub(crate) struct ElectrumRuntimeClient { electrum_client: Arc, diff --git a/src/config.rs b/src/config.rs index a2930ea5a..7b7ed8156 100644 --- a/src/config.rs +++ b/src/config.rs @@ -65,10 +65,13 @@ pub(crate) const NODE_ANN_BCAST_INTERVAL: Duration = Duration::from_secs(60 * 60 pub(crate) const WALLET_SYNC_INTERVAL_MINIMUM_SECS: u64 = 10; // The timeout after which we abort a wallet syncing operation. -pub(crate) const BDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 90; +pub(crate) const BDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 20; // The timeout after which we abort a wallet syncing operation. -pub(crate) const LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 30; +pub(crate) const LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 10; + +// The timeout after which we give up waiting on LDK's event handler to exit on shutdown. +pub(crate) const LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS: u64 = 30; // The timeout after which we abort a fee rate cache update operation. pub(crate) const FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS: u64 = 5; diff --git a/src/lib.rs b/src/lib.rs index a75da763a..8579c29fc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -126,8 +126,9 @@ pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ - default_user_config, may_announce_channel, ChannelConfig, Config, NODE_ANN_BCAST_INTERVAL, - PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, + default_user_config, may_announce_channel, ChannelConfig, Config, + LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, + RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; use event::{EventHandler, EventQueue}; @@ -673,13 +674,10 @@ impl Node { let event_handling_stopped_logger = Arc::clone(&self.logger); let mut event_handling_stopped_receiver = self.event_handling_stopped_sender.subscribe(); - // FIXME: For now, we wait up to 100 secs (BDK_WALLET_SYNC_TIMEOUT_SECS + 10) to allow - // event handling to exit gracefully even if it was blocked on the BDK wallet syncing. We - // should drop this considerably post upgrading to BDK 1.0. let timeout_res = tokio::task::block_in_place(move || { runtime.block_on(async { tokio::time::timeout( - Duration::from_secs(100), + Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS), event_handling_stopped_receiver.changed(), ) .await From 9eae61dc0a4b0f14a728d2019cd10d20c9b35a2a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Jul 2025 11:37:52 +0200 Subject: [PATCH 023/184] Await on the background processing task's `JoinHandle` Previously, we used to a channel to indicate that the background processor task has been stopped. Here, we rather just await the task's `JoinHandle` which is more robust in that it avoids a race condition. --- src/builder.rs | 4 +-- src/lib.rs | 80 +++++++++++++++++++++++--------------------------- 2 files changed, 38 insertions(+), 46 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index a177768f6..66b160e31 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1591,12 +1591,12 @@ fn build_with_store_internal( }; let (stop_sender, _) = tokio::sync::watch::channel(()); - let (event_handling_stopped_sender, _) = tokio::sync::watch::channel(()); + let background_processor_task = Mutex::new(None); Ok(Node { runtime, stop_sender, - event_handling_stopped_sender, + background_processor_task, config, wallet, chain_source, diff --git a/src/lib.rs b/src/lib.rs index 8579c29fc..e0f8ff236 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -180,7 +180,7 @@ uniffi::include_scaffolding!("ldk_node"); pub struct Node { runtime: Arc>>>, stop_sender: tokio::sync::watch::Sender<()>, - event_handling_stopped_sender: tokio::sync::watch::Sender<()>, + background_processor_task: Mutex>>, config: Arc, wallet: Arc, chain_source: Arc, @@ -579,8 +579,7 @@ impl Node { }; let background_stop_logger = Arc::clone(&self.logger); - let event_handling_stopped_sender = self.event_handling_stopped_sender.clone(); - runtime.spawn(async move { + let handle = runtime.spawn(async move { process_events_async( background_persister, |e| background_event_handler.handle_event(e), @@ -601,19 +600,9 @@ impl Node { panic!("Failed to process events"); }); log_debug!(background_stop_logger, "Events processing stopped.",); - - match event_handling_stopped_sender.send(()) { - Ok(_) => (), - Err(e) => { - log_error!( - background_stop_logger, - "Failed to send 'events handling stopped' signal. This should never happen: {}", - e - ); - debug_assert!(false); - }, - } }); + debug_assert!(self.background_processor_task.lock().unwrap().is_none()); + *self.background_processor_task.lock().unwrap() = Some(handle); if let Some(liquidity_source) = self.liquidity_source.as_ref() { let mut stop_liquidity_handler = self.stop_sender.subscribe(); @@ -670,39 +659,42 @@ impl Node { // Disconnect all peers. self.peer_manager.disconnect_all_peers(); - // Wait until event handling stopped, at least until a timeout is reached. - let event_handling_stopped_logger = Arc::clone(&self.logger); - let mut event_handling_stopped_receiver = self.event_handling_stopped_sender.subscribe(); + // Stop any runtime-dependant chain sources. + self.chain_source.stop(); - let timeout_res = tokio::task::block_in_place(move || { - runtime.block_on(async { - tokio::time::timeout( - Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS), - event_handling_stopped_receiver.changed(), - ) - .await - }) - }); + // Wait until background processing stopped, at least until a timeout is reached. + if let Some(background_processor_task) = + self.background_processor_task.lock().unwrap().take() + { + let abort_handle = background_processor_task.abort_handle(); + let timeout_res = tokio::task::block_in_place(move || { + runtime.block_on(async { + tokio::time::timeout( + Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS), + background_processor_task, + ) + .await + }) + }); - match timeout_res { - Ok(stop_res) => match stop_res { - Ok(()) => {}, + match timeout_res { + Ok(stop_res) => match stop_res { + Ok(()) => {}, + Err(e) => { + abort_handle.abort(); + log_error!( + self.logger, + "Stopping event handling failed. This should never happen: {}", + e + ); + panic!("Stopping event handling failed. This should never happen."); + }, + }, Err(e) => { - log_error!( - event_handling_stopped_logger, - "Stopping event handling failed. This should never happen: {}", - e - ); - panic!("Stopping event handling failed. This should never happen."); + abort_handle.abort(); + log_error!(self.logger, "Stopping event handling timed out: {}", e); }, - }, - Err(e) => { - log_error!( - event_handling_stopped_logger, - "Stopping event handling timed out: {}", - e - ); - }, + } } #[cfg(tokio_unstable)] From c3d6161b7e46ef20db6530827aeef3a21c57703a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 7 Jul 2025 13:28:03 +0200 Subject: [PATCH 024/184] Improve logging in `stop` .. we provide finer-grained logging after each step of `stop`. --- src/lib.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index e0f8ff236..0a53fbbb3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -147,9 +147,7 @@ use types::{ }; pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, UserChannelId}; -#[cfg(tokio_unstable)] -use logger::log_trace; -use logger::{log_debug, log_error, log_info, LdkLogger, Logger}; +use logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use lightning::chain::BestBlock; use lightning::events::bump_transaction::Wallet as LdkWallet; @@ -578,7 +576,6 @@ impl Node { }) }; - let background_stop_logger = Arc::clone(&self.logger); let handle = runtime.spawn(async move { process_events_async( background_persister, @@ -599,7 +596,6 @@ impl Node { log_error!(background_error_logger, "Failed to process events: {}", e); panic!("Failed to process events"); }); - log_debug!(background_stop_logger, "Events processing stopped.",); }); debug_assert!(self.background_processor_task.lock().unwrap().is_none()); *self.background_processor_task.lock().unwrap() = Some(handle); @@ -645,7 +641,7 @@ impl Node { // Stop the runtime. match self.stop_sender.send(()) { - Ok(_) => (), + Ok(_) => log_trace!(self.logger, "Sent shutdown signal to background tasks."), Err(e) => { log_error!( self.logger, @@ -658,9 +654,11 @@ impl Node { // Disconnect all peers. self.peer_manager.disconnect_all_peers(); + log_debug!(self.logger, "Disconnected all network peers."); // Stop any runtime-dependant chain sources. self.chain_source.stop(); + log_debug!(self.logger, "Stopped chain sources."); // Wait until background processing stopped, at least until a timeout is reached. if let Some(background_processor_task) = @@ -679,7 +677,7 @@ impl Node { match timeout_res { Ok(stop_res) => match stop_res { - Ok(()) => {}, + Ok(()) => log_debug!(self.logger, "Stopped background processing of events."), Err(e) => { abort_handle.abort(); log_error!( From 8c157c3b9f80d02d0918933c2fa141bcdc4117e7 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Thu, 3 Jul 2025 14:46:34 +0100 Subject: [PATCH 025/184] ci(vss): use latest gradle version update to Gradle 9.0 and auto-extract version number. --- .github/workflows/vss-integration.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index 2a6c63704..f7a230780 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -62,9 +62,16 @@ jobs: # Print Info java -version gradle --version + + GRADLE_VERSION=$(gradle --version | awk '/^Gradle/ {print $2}' | head -1) + if [ -z "$GRADLE_VERSION" ]; then + echo "Error: Failed to extract Gradle version." >&2 + exit 1 + fi + echo "Extracted Gradle Version: $GRADLE_VERSION" cd vss-server/java - gradle wrapper --gradle-version 8.1.1 + gradle wrapper --gradle-version $GRADLE_VERSION ./gradlew --version ./gradlew build From d5df3d0c739e1022006aef8c52a4562aa5fc6852 Mon Sep 17 00:00:00 2001 From: benthecarman Date: Fri, 18 Jul 2025 13:34:47 -0500 Subject: [PATCH 026/184] Add option to include headers in Esplora config This is useful if the esplora server has a form of authentication in front of it --- src/builder.rs | 44 ++++++++++++++++++++++++++++++++++++++++++-- src/chain/mod.rs | 18 ++++++++++++++---- 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 66b160e31..30a1649d2 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -87,6 +87,7 @@ const LSPS_HARDENED_CHILD_INDEX: u32 = 577; enum ChainDataSourceConfig { Esplora { server_url: String, + headers: HashMap, sync_config: Option, }, Electrum { @@ -294,9 +295,28 @@ impl NodeBuilder { /// information. pub fn set_chain_source_esplora( &mut self, server_url: String, sync_config: Option, + ) -> &mut Self { + self.chain_data_source_config = Some(ChainDataSourceConfig::Esplora { + server_url, + headers: Default::default(), + sync_config, + }); + self + } + + /// Configures the [`Node`] instance to source its chain data from the given Esplora server. + /// + /// The given `headers` will be included in all requests to the Esplora server, typically used for + /// authentication purposes. + /// + /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more + /// information. + pub fn set_chain_source_esplora_with_headers( + &mut self, server_url: String, headers: HashMap, + sync_config: Option, ) -> &mut Self { self.chain_data_source_config = - Some(ChainDataSourceConfig::Esplora { server_url, sync_config }); + Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }); self } @@ -754,6 +774,24 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config); } + /// Configures the [`Node`] instance to source its chain data from the given Esplora server. + /// + /// The given `headers` will be included in all requests to the Esplora server, typically used for + /// authentication purposes. + /// + /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more + /// information. + pub fn set_chain_source_esplora_with_headers( + &self, server_url: String, headers: HashMap, + sync_config: Option, + ) { + self.inner.write().unwrap().set_chain_source_esplora_with_headers( + server_url, + headers, + sync_config, + ); + } + /// Configures the [`Node`] instance to source its chain data from the given Electrum server. /// /// If no `sync_config` is given, default values are used. See [`ElectrumSyncConfig`] for more @@ -1117,10 +1155,11 @@ fn build_with_store_internal( )); let chain_source = match chain_data_source_config { - Some(ChainDataSourceConfig::Esplora { server_url, sync_config }) => { + Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => { let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); Arc::new(ChainSource::new_esplora( server_url.clone(), + headers.clone(), sync_config, Arc::clone(&wallet), Arc::clone(&fee_estimator), @@ -1187,6 +1226,7 @@ fn build_with_store_internal( let sync_config = EsploraSyncConfig::default(); Arc::new(ChainSource::new_esplora( server_url.clone(), + HashMap::new(), sync_config, Arc::clone(&wallet), Arc::clone(&fee_estimator), diff --git a/src/chain/mod.rs b/src/chain/mod.rs index c3d5fdedc..2f8eeaac4 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -233,21 +233,31 @@ pub(crate) enum ChainSource { impl ChainSource { pub(crate) fn new_esplora( - server_url: String, sync_config: EsploraSyncConfig, onchain_wallet: Arc, - fee_estimator: Arc, tx_broadcaster: Arc, - kv_store: Arc, config: Arc, logger: Arc, - node_metrics: Arc>, + server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, ) -> Self { // FIXME / TODO: We introduced this to make `bdk_esplora` work separately without updating // `lightning-transaction-sync`. We should revert this as part of of the upgrade to LDK 0.2. let mut client_builder_0_11 = esplora_client_0_11::Builder::new(&server_url); client_builder_0_11 = client_builder_0_11.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + + for (header_name, header_value) in &headers { + client_builder_0_11 = client_builder_0_11.header(header_name, header_value); + } + let esplora_client_0_11 = client_builder_0_11.build_async().unwrap(); let tx_sync = Arc::new(EsploraSyncClient::from_client(esplora_client_0_11, Arc::clone(&logger))); let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + + for (header_name, header_value) in &headers { + client_builder = client_builder.header(header_name, header_value); + } + let esplora_client = client_builder.build_async().unwrap(); let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); From 7e93a8bc60bf188373f70b65c5f4b30e334be9c4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 24 Jul 2025 15:15:52 +0200 Subject: [PATCH 027/184] Introduce `ChainSourceKind` type We introduce a new `ChainSourceKind` that is held as a field by `ChainSource`, which better encapsulates the chain syncing logic, and in future commits allows us to move some common fields to `ChainSource`. --- src/chain/mod.rs | 117 ++++++++++++++++++++++++++++------------------- src/lib.rs | 32 +++++-------- 2 files changed, 81 insertions(+), 68 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 2f8eeaac4..ef844264b 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -187,7 +187,11 @@ impl ElectrumRuntimeStatus { } } -pub(crate) enum ChainSource { +pub(crate) struct ChainSource { + kind: ChainSourceKind, +} + +enum ChainSourceKind { Esplora { sync_config: EsploraSyncConfig, esplora_client: EsploraAsyncClient, @@ -262,7 +266,7 @@ impl ChainSource { let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - Self::Esplora { + let kind = ChainSourceKind::Esplora { sync_config, esplora_client, onchain_wallet, @@ -275,7 +279,9 @@ impl ChainSource { config, logger, node_metrics, - } + }; + + Self { kind } } pub(crate) fn new_electrum( @@ -287,7 +293,7 @@ impl ChainSource { let electrum_runtime_status = RwLock::new(ElectrumRuntimeStatus::new()); let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - Self::Electrum { + let kind = ChainSourceKind::Electrum { server_url, sync_config, electrum_runtime_status, @@ -300,7 +306,8 @@ impl ChainSource { config, logger, node_metrics, - } + }; + Self { kind } } pub(crate) fn new_bitcoind_rpc( @@ -319,7 +326,7 @@ impl ChainSource { let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); let latest_chain_tip = RwLock::new(None); let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); - Self::Bitcoind { + let kind = ChainSourceKind::Bitcoind { api_client, header_cache, latest_chain_tip, @@ -331,7 +338,8 @@ impl ChainSource { config, logger, node_metrics, - } + }; + Self { kind } } pub(crate) fn new_bitcoind_rest( @@ -354,7 +362,7 @@ impl ChainSource { let latest_chain_tip = RwLock::new(None); let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); - Self::Bitcoind { + let kind = ChainSourceKind::Bitcoind { api_client, header_cache, latest_chain_tip, @@ -366,12 +374,19 @@ impl ChainSource { config, logger, node_metrics, - } + }; + Self { kind } } pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { - match self { - Self::Electrum { server_url, electrum_runtime_status, config, logger, .. } => { + match &self.kind { + ChainSourceKind::Electrum { + server_url, + electrum_runtime_status, + config, + logger, + .. + } => { electrum_runtime_status.write().unwrap().start( server_url.clone(), Arc::clone(&runtime), @@ -387,8 +402,8 @@ impl ChainSource { } pub(crate) fn stop(&self) { - match self { - Self::Electrum { electrum_runtime_status, .. } => { + match &self.kind { + ChainSourceKind::Electrum { electrum_runtime_status, .. } => { electrum_runtime_status.write().unwrap().stop(); }, _ => { @@ -398,19 +413,27 @@ impl ChainSource { } pub(crate) fn as_utxo_source(&self) -> Option> { - match self { - Self::Bitcoind { api_client, .. } => Some(api_client.utxo_source()), + match &self.kind { + ChainSourceKind::Bitcoind { api_client, .. } => Some(api_client.utxo_source()), _ => None, } } + pub(crate) fn is_transaction_based(&self) -> bool { + match &self.kind { + ChainSourceKind::Esplora { .. } => true, + ChainSourceKind::Electrum { .. } => true, + ChainSourceKind::Bitcoind { .. } => false, + } + } + pub(crate) async fn continuously_sync_wallets( &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) { - match self { - Self::Esplora { sync_config, logger, .. } => { + match &self.kind { + ChainSourceKind::Esplora { sync_config, logger, .. } => { if let Some(background_sync_config) = sync_config.background_sync_config.as_ref() { self.start_tx_based_sync_loop( stop_sync_receiver, @@ -430,7 +453,7 @@ impl ChainSource { return; } }, - Self::Electrum { sync_config, logger, .. } => { + ChainSourceKind::Electrum { sync_config, logger, .. } => { if let Some(background_sync_config) = sync_config.background_sync_config.as_ref() { self.start_tx_based_sync_loop( stop_sync_receiver, @@ -450,7 +473,7 @@ impl ChainSource { return; } }, - Self::Bitcoind { + ChainSourceKind::Bitcoind { api_client, header_cache, latest_chain_tip, @@ -681,8 +704,8 @@ impl ChainSource { // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { - match self { - Self::Esplora { + match &self.kind { + ChainSourceKind::Esplora { esplora_client, onchain_wallet, onchain_wallet_sync_status, @@ -795,7 +818,7 @@ impl ChainSource { res }, - Self::Electrum { + ChainSourceKind::Electrum { electrum_runtime_status, onchain_wallet, onchain_wallet_sync_status, @@ -887,7 +910,7 @@ impl ChainSource { res }, - Self::Bitcoind { .. } => { + ChainSourceKind::Bitcoind { .. } => { // In BitcoindRpc mode we sync lightning and onchain wallet in one go via // `ChainPoller`. So nothing to do here. unreachable!("Onchain wallet will be synced via chain polling") @@ -901,8 +924,8 @@ impl ChainSource { &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { - match self { - Self::Esplora { + match &self.kind { + ChainSourceKind::Esplora { tx_sync, lightning_wallet_sync_status, kv_store, @@ -986,7 +1009,7 @@ impl ChainSource { res }, - Self::Electrum { + ChainSourceKind::Electrum { electrum_runtime_status, lightning_wallet_sync_status, kv_store, @@ -1057,7 +1080,7 @@ impl ChainSource { res }, - Self::Bitcoind { .. } => { + ChainSourceKind::Bitcoind { .. } => { // In BitcoindRpc mode we sync lightning and onchain wallet in one go via // `ChainPoller`. So nothing to do here. unreachable!("Lightning wallet will be synced via chain polling") @@ -1069,18 +1092,18 @@ impl ChainSource { &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { - match self { - Self::Esplora { .. } => { + match &self.kind { + ChainSourceKind::Esplora { .. } => { // In Esplora mode we sync lightning and onchain wallets via // `sync_onchain_wallet` and `sync_lightning_wallet`. So nothing to do here. unreachable!("Listeners will be synced via transction-based syncing") }, - Self::Electrum { .. } => { + ChainSourceKind::Electrum { .. } => { // In Electrum mode we sync lightning and onchain wallets via // `sync_onchain_wallet` and `sync_lightning_wallet`. So nothing to do here. unreachable!("Listeners will be synced via transction-based syncing") }, - Self::Bitcoind { + ChainSourceKind::Bitcoind { api_client, header_cache, latest_chain_tip, @@ -1220,8 +1243,8 @@ impl ChainSource { } pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { - match self { - Self::Esplora { + match &self.kind { + ChainSourceKind::Esplora { esplora_client, fee_estimator, config, @@ -1305,7 +1328,7 @@ impl ChainSource { Ok(()) }, - Self::Electrum { + ChainSourceKind::Electrum { electrum_runtime_status, fee_estimator, kv_store, @@ -1350,7 +1373,7 @@ impl ChainSource { Ok(()) }, - Self::Bitcoind { + ChainSourceKind::Bitcoind { api_client, fee_estimator, config, @@ -1483,8 +1506,8 @@ impl ChainSource { } pub(crate) async fn process_broadcast_queue(&self) { - match self { - Self::Esplora { esplora_client, tx_broadcaster, logger, .. } => { + match &self.kind { + ChainSourceKind::Esplora { esplora_client, tx_broadcaster, logger, .. } => { let mut receiver = tx_broadcaster.get_broadcast_queue().await; while let Some(next_package) = receiver.recv().await { for tx in &next_package { @@ -1560,7 +1583,7 @@ impl ChainSource { } } }, - Self::Electrum { electrum_runtime_status, tx_broadcaster, .. } => { + ChainSourceKind::Electrum { electrum_runtime_status, tx_broadcaster, .. } => { let electrum_client: Arc = if let Some(client) = electrum_runtime_status.read().unwrap().client().as_ref() { @@ -1580,7 +1603,7 @@ impl ChainSource { } } }, - Self::Bitcoind { api_client, tx_broadcaster, logger, .. } => { + ChainSourceKind::Bitcoind { api_client, tx_broadcaster, logger, .. } => { // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 // features, we should eventually switch to use `submitpackage` via the // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual @@ -1640,21 +1663,21 @@ impl ChainSource { impl Filter for ChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { - match self { - Self::Esplora { tx_sync, .. } => tx_sync.register_tx(txid, script_pubkey), - Self::Electrum { electrum_runtime_status, .. } => { + match &self.kind { + ChainSourceKind::Esplora { tx_sync, .. } => tx_sync.register_tx(txid, script_pubkey), + ChainSourceKind::Electrum { electrum_runtime_status, .. } => { electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) }, - Self::Bitcoind { .. } => (), + ChainSourceKind::Bitcoind { .. } => (), } } fn register_output(&self, output: lightning::chain::WatchedOutput) { - match self { - Self::Esplora { tx_sync, .. } => tx_sync.register_output(output), - Self::Electrum { electrum_runtime_status, .. } => { + match &self.kind { + ChainSourceKind::Esplora { tx_sync, .. } => tx_sync.register_output(output), + ChainSourceKind::Electrum { electrum_runtime_status, .. } => { electrum_runtime_status.write().unwrap().register_output(output) }, - Self::Bitcoind { .. } => (), + ChainSourceKind::Bitcoind { .. } => (), } } } diff --git a/src/lib.rs b/src/lib.rs index 0a53fbbb3..89a17ab03 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1250,27 +1250,17 @@ impl Node { tokio::task::block_in_place(move || { tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on( async move { - match chain_source.as_ref() { - ChainSource::Esplora { .. } => { - chain_source.update_fee_rate_estimates().await?; - chain_source - .sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper) - .await?; - chain_source.sync_onchain_wallet().await?; - }, - ChainSource::Electrum { .. } => { - chain_source.update_fee_rate_estimates().await?; - chain_source - .sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper) - .await?; - chain_source.sync_onchain_wallet().await?; - }, - ChainSource::Bitcoind { .. } => { - chain_source.update_fee_rate_estimates().await?; - chain_source - .poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper) - .await?; - }, + if chain_source.is_transaction_based() { + chain_source.update_fee_rate_estimates().await?; + chain_source + .sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper) + .await?; + chain_source.sync_onchain_wallet().await?; + } else { + chain_source.update_fee_rate_estimates().await?; + chain_source + .poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper) + .await?; } Ok(()) }, From 5afe490b4f7bd73048d3b7c4d9d506af7eef8fd1 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 29 Jul 2025 08:58:15 +0200 Subject: [PATCH 028/184] Intermittently introduce additional `impl` blocks .. in the hopes of making the git diff more readable going forward, we break up the `ChainSource` impl block. --- src/chain/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index ef844264b..f75837cde 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -700,7 +700,9 @@ impl ChainSource { } } } +} +impl ChainSource { // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { @@ -917,7 +919,9 @@ impl ChainSource { }, } } +} +impl ChainSource { // Synchronize the Lightning wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) pub(crate) async fn sync_lightning_wallet( @@ -1087,7 +1091,9 @@ impl ChainSource { }, } } +} +impl ChainSource { pub(crate) async fn poll_and_update_listeners( &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, @@ -1241,7 +1247,9 @@ impl ChainSource { }, } } +} +impl ChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { match &self.kind { ChainSourceKind::Esplora { @@ -1504,7 +1512,9 @@ impl ChainSource { }, } } +} +impl ChainSource { pub(crate) async fn process_broadcast_queue(&self) { match &self.kind { ChainSourceKind::Esplora { esplora_client, tx_broadcaster, logger, .. } => { From 56167cecc03814fa42e440cfd1f90e771aaa4d57 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 24 Jul 2025 15:01:18 +0200 Subject: [PATCH 029/184] Move Esplora sync logic to a `EsploraChainSource` type We refactor our `ChainSource` logic and move out the Esplora code into a new object. --- src/builder.rs | 6 +- src/chain/mod.rs | 749 +++++++++++++++++++++++++---------------------- src/config.rs | 6 + 3 files changed, 403 insertions(+), 358 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 30a1649d2..85ec70d18 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -5,11 +5,11 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::chain::{ChainSource, DEFAULT_ESPLORA_SERVER_URL}; +use crate::chain::ChainSource; use crate::config::{ default_user_config, may_announce_channel, AnnounceError, BitcoindRestClientConfig, Config, - ElectrumSyncConfig, EsploraSyncConfig, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, - WALLET_KEYS_SEED_LEN, + ElectrumSyncConfig, EsploraSyncConfig, DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, + DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN, }; use crate::connection::ConnectionManager; diff --git a/src/chain/mod.rs b/src/chain/mod.rs index f75837cde..4544a6d3a 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -15,9 +15,9 @@ use crate::chain::electrum::ElectrumRuntimeClient; use crate::config::{ BackgroundSyncConfig, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, - FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, - RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, TX_BROADCAST_TIMEOUT_SECS, - WALLET_SYNC_INTERVAL_MINIMUM_SECS, + DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, + LDK_WALLET_SYNC_TIMEOUT_SECS, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, + TX_BROADCAST_TIMEOUT_SECS, WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, @@ -32,30 +32,24 @@ use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarge use lightning::chain::{Confirm, Filter, Listen, WatchedOutput}; use lightning::util::ser::Writeable; -use lightning_transaction_sync::EsploraSyncClient; - use lightning_block_sync::gossip::UtxoSource; use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; use lightning_block_sync::{BlockSourceErrorKind, SpvClient}; -use bdk_esplora::EsploraAsyncExt; -use bdk_wallet::Update as BdkUpdate; +use lightning_transaction_sync::EsploraSyncClient; +use bdk_esplora::EsploraAsyncExt; use esplora_client::AsyncClient as EsploraAsyncClient; +use bdk_wallet::Update as BdkUpdate; + use bitcoin::{FeeRate, Network, Script, ScriptBuf, Txid}; use std::collections::HashMap; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -// The default Esplora server we're using. -pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; - -// The default Esplora client timeout we're using. -pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; - const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; pub(crate) enum WalletSyncStatus { @@ -187,25 +181,77 @@ impl ElectrumRuntimeStatus { } } +pub(super) struct EsploraChainSource { + pub(super) sync_config: EsploraSyncConfig, + esplora_client: EsploraAsyncClient, + onchain_wallet: Arc, + onchain_wallet_sync_status: Mutex, + tx_sync: Arc>>, + lightning_wallet_sync_status: Mutex, + fee_estimator: Arc, + tx_broadcaster: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, +} + +impl EsploraChainSource { + pub(crate) fn new( + server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, + ) -> Self { + // FIXME / TODO: We introduced this to make `bdk_esplora` work separately without updating + // `lightning-transaction-sync`. We should revert this as part of of the upgrade to LDK 0.2. + let mut client_builder_0_11 = esplora_client_0_11::Builder::new(&server_url); + client_builder_0_11 = client_builder_0_11.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + + for (header_name, header_value) in &headers { + client_builder_0_11 = client_builder_0_11.header(header_name, header_value); + } + + let esplora_client_0_11 = client_builder_0_11.build_async().unwrap(); + let tx_sync = + Arc::new(EsploraSyncClient::from_client(esplora_client_0_11, Arc::clone(&logger))); + + let mut client_builder = esplora_client::Builder::new(&server_url); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + + for (header_name, header_value) in &headers { + client_builder = client_builder.header(header_name, header_value); + } + + let esplora_client = client_builder.build_async().unwrap(); + + let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + Self { + sync_config, + esplora_client, + onchain_wallet, + onchain_wallet_sync_status, + tx_sync, + lightning_wallet_sync_status, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger, + node_metrics, + } + } +} + + pub(crate) struct ChainSource { kind: ChainSourceKind, + logger: Arc, } enum ChainSourceKind { - Esplora { - sync_config: EsploraSyncConfig, - esplora_client: EsploraAsyncClient, - onchain_wallet: Arc, - onchain_wallet_sync_status: Mutex, - tx_sync: Arc>>, - lightning_wallet_sync_status: Mutex, - fee_estimator: Arc, - tx_broadcaster: Arc, - kv_store: Arc, - config: Arc, - logger: Arc, - node_metrics: Arc>, - }, + Esplora(EsploraChainSource), Electrum { server_url: String, sync_config: ElectrumSyncConfig, @@ -242,46 +288,20 @@ impl ChainSource { tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, ) -> Self { - // FIXME / TODO: We introduced this to make `bdk_esplora` work separately without updating - // `lightning-transaction-sync`. We should revert this as part of of the upgrade to LDK 0.2. - let mut client_builder_0_11 = esplora_client_0_11::Builder::new(&server_url); - client_builder_0_11 = client_builder_0_11.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - - for (header_name, header_value) in &headers { - client_builder_0_11 = client_builder_0_11.header(header_name, header_value); - } - - let esplora_client_0_11 = client_builder_0_11.build_async().unwrap(); - let tx_sync = - Arc::new(EsploraSyncClient::from_client(esplora_client_0_11, Arc::clone(&logger))); - - let mut client_builder = esplora_client::Builder::new(&server_url); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - - for (header_name, header_value) in &headers { - client_builder = client_builder.header(header_name, header_value); - } - - let esplora_client = client_builder.build_async().unwrap(); - - let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - let kind = ChainSourceKind::Esplora { + let esplora_chain_source = EsploraChainSource::new( + server_url, + headers, sync_config, - esplora_client, onchain_wallet, - onchain_wallet_sync_status, - tx_sync, - lightning_wallet_sync_status, fee_estimator, tx_broadcaster, kv_store, config, - logger, + Arc::clone(&logger), node_metrics, - }; - - Self { kind } + ); + let kind = ChainSourceKind::Esplora(esplora_chain_source); + Self { kind, logger } } pub(crate) fn new_electrum( @@ -304,10 +324,10 @@ impl ChainSource { tx_broadcaster, kv_store, config, - logger, + logger: Arc::clone(&logger), node_metrics, }; - Self { kind } + Self { kind, logger } } pub(crate) fn new_bitcoind_rpc( @@ -336,10 +356,10 @@ impl ChainSource { tx_broadcaster, kv_store, config, - logger, + logger: Arc::clone(&logger), node_metrics, }; - Self { kind } + Self { kind, logger } } pub(crate) fn new_bitcoind_rest( @@ -372,10 +392,10 @@ impl ChainSource { tx_broadcaster, kv_store, config, - logger, + logger: Arc::clone(&logger), node_metrics, }; - Self { kind } + Self { kind, logger } } pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { @@ -421,7 +441,7 @@ impl ChainSource { pub(crate) fn is_transaction_based(&self) -> bool { match &self.kind { - ChainSourceKind::Esplora { .. } => true, + ChainSourceKind::Esplora(_) => true, ChainSourceKind::Electrum { .. } => true, ChainSourceKind::Bitcoind { .. } => false, } @@ -433,21 +453,23 @@ impl ChainSource { output_sweeper: Arc, ) { match &self.kind { - ChainSourceKind::Esplora { sync_config, logger, .. } => { - if let Some(background_sync_config) = sync_config.background_sync_config.as_ref() { + ChainSourceKind::Esplora(esplora_chain_source) => { + if let Some(background_sync_config) = + esplora_chain_source.sync_config.background_sync_config.as_ref() + { self.start_tx_based_sync_loop( stop_sync_receiver, channel_manager, chain_monitor, output_sweeper, background_sync_config, - Arc::clone(&logger), + Arc::clone(&self.logger), ) .await } else { // Background syncing is disabled log_info!( - logger, + self.logger, "Background syncing is disabled. Manual syncing required for onchain wallet, lightning wallet, and fee rate updates.", ); return; @@ -467,7 +489,7 @@ impl ChainSource { } else { // Background syncing is disabled log_info!( - logger, + self.logger, "Background syncing is disabled. Manual syncing required for onchain wallet, lightning wallet, and fee rate updates.", ); return; @@ -702,48 +724,36 @@ impl ChainSource { } } -impl ChainSource { - // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, - // etc.) - pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { - match &self.kind { - ChainSourceKind::Esplora { - esplora_client, - onchain_wallet, - onchain_wallet_sync_status, - kv_store, - logger, - node_metrics, - .. - } => { - let receiver_res = { - let mut status_lock = onchain_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } +impl EsploraChainSource { + pub(super) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + let receiver_res = { + let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } - let res = { - // If this is our first sync, do a full scan with the configured gap limit. - // Otherwise just do an incremental sync. - let incremental_sync = - node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + let res = { + // If this is our first sync, do a full scan with the configured gap limit. + // Otherwise just do an incremental sync. + let incremental_sync = + self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); - macro_rules! get_and_apply_wallet_update { + macro_rules! get_and_apply_wallet_update { ($sync_future: expr) => {{ let now = Instant::now(); match $sync_future.await { Ok(res) => match res { - Ok(update) => match onchain_wallet.apply_update(update) { + Ok(update) => match self.onchain_wallet.apply_update(update) { Ok(()) => { log_info!( - logger, + self.logger, "{} of on-chain wallet finished in {}ms.", if incremental_sync { "Incremental sync" } else { "Sync" }, now.elapsed().as_millis() @@ -753,9 +763,13 @@ impl ChainSource { .ok() .map(|d| d.as_secs()); { - let mut locked_node_metrics = node_metrics.write().unwrap(); + let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, Arc::clone(&kv_store), Arc::clone(&logger))?; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger) + )?; } Ok(()) }, @@ -764,7 +778,7 @@ impl ChainSource { Err(e) => match *e { esplora_client::Error::Reqwest(he) => { log_error!( - logger, + self.logger, "{} of on-chain wallet failed due to HTTP connection error: {}", if incremental_sync { "Incremental sync" } else { "Sync" }, he @@ -773,7 +787,7 @@ impl ChainSource { }, _ => { log_error!( - logger, + self.logger, "{} of on-chain wallet failed due to Esplora error: {}", if incremental_sync { "Incremental sync" } else { "Sync" }, e @@ -784,7 +798,7 @@ impl ChainSource { }, Err(e) => { log_error!( - logger, + self.logger, "{} of on-chain wallet timed out: {}", if incremental_sync { "Incremental sync" } else { "Sync" }, e @@ -795,30 +809,40 @@ impl ChainSource { }} } - if incremental_sync { - let sync_request = onchain_wallet.get_incremental_sync_request(); - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), - ); - get_and_apply_wallet_update!(wallet_sync_timeout_fut) - } else { - let full_scan_request = onchain_wallet.get_full_scan_request(); - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - esplora_client.full_scan( - full_scan_request, - BDK_CLIENT_STOP_GAP, - BDK_CLIENT_CONCURRENCY, - ), - ); - get_and_apply_wallet_update!(wallet_sync_timeout_fut) - } - }; + if incremental_sync { + let sync_request = self.onchain_wallet.get_incremental_sync_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } else { + let full_scan_request = self.onchain_wallet.get_full_scan_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + self.esplora_client.full_scan( + full_scan_request, + BDK_CLIENT_STOP_GAP, + BDK_CLIENT_CONCURRENCY, + ), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } + }; - onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - res + res + } +} + +impl ChainSource { + // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, + // etc.) + pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.sync_onchain_wallet().await }, ChainSourceKind::Electrum { electrum_runtime_status, @@ -921,97 +945,99 @@ impl ChainSource { } } -impl ChainSource { - // Synchronize the Lightning wallet via transaction-based protocols (i.e., Esplora, Electrum, - // etc.) - pub(crate) async fn sync_lightning_wallet( +impl EsploraChainSource { + pub(super) async fn sync_lightning_wallet( &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { - match &self.kind { - ChainSourceKind::Esplora { - tx_sync, - lightning_wallet_sync_status, - kv_store, - logger, - node_metrics, - .. - } => { - let sync_cman = Arc::clone(&channel_manager); - let sync_cmon = Arc::clone(&chain_monitor); - let sync_sweeper = Arc::clone(&output_sweeper); - let confirmables = vec![ - &*sync_cman as &(dyn Confirm + Sync + Send), - &*sync_cmon as &(dyn Confirm + Sync + Send), - &*sync_sweeper as &(dyn Confirm + Sync + Send), - ]; + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + &*sync_cman as &(dyn Confirm + Sync + Send), + &*sync_cmon as &(dyn Confirm + Sync + Send), + &*sync_sweeper as &(dyn Confirm + Sync + Send), + ]; + + let receiver_res = { + let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + let res = { + let timeout_fut = tokio::time::timeout( + Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), + self.tx_sync.sync(confirmables), + ); + let now = Instant::now(); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_info!( + self.logger, + "Sync of Lightning wallet finished in {}ms.", + now.elapsed().as_millis() + ); - let receiver_res = { - let mut status_lock = lightning_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - let res = { - let timeout_fut = tokio::time::timeout( - Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), - tx_sync.sync(confirmables), - ); - let now = Instant::now(); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_info!( - logger, - "Sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - } + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.node_metrics), + )?; + Ok(()) + }, + Err(e) => { + log_error!(self.logger, "Sync of Lightning wallet failed: {}", e); + Err(e.into()) + }, + }, + Err(e) => { + log_error!(self.logger, "Lightning wallet sync timed out: {}", e); + Err(Error::TxSyncTimeout) + }, + } + }; - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&kv_store), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )?; - Ok(()) - }, - Err(e) => { - log_error!(logger, "Sync of Lightning wallet failed: {}", e); - Err(e.into()) - }, - }, - Err(e) => { - log_error!(logger, "Lightning wallet sync timed out: {}", e); - Err(Error::TxSyncTimeout) - }, - } - }; + self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + res + } +} - res +impl ChainSource { + // Synchronize the Lightning wallet via transaction-based protocols (i.e., Esplora, Electrum, + // etc.) + pub(crate) async fn sync_lightning_wallet( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source + .sync_lightning_wallet(channel_manager, chain_monitor, output_sweeper) + .await }, ChainSourceKind::Electrum { electrum_runtime_status, @@ -1249,92 +1275,89 @@ impl ChainSource { } } -impl ChainSource { +impl EsploraChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { - match &self.kind { - ChainSourceKind::Esplora { - esplora_client, - fee_estimator, - config, - kv_store, - logger, - node_metrics, - .. - } => { - let now = Instant::now(); - let estimates = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), - esplora_client.get_fee_estimates(), - ) - .await - .map_err(|e| { - log_error!(logger, "Updating fee rate estimates timed out: {}", e); - Error::FeerateEstimationUpdateTimeout - })? - .map_err(|e| { - log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); - Error::FeerateEstimationUpdateFailed - })?; - - if estimates.is_empty() && config.network == Network::Bitcoin { - // Ensure we fail if we didn't receive any estimates. - log_error!( - logger, + let now = Instant::now(); + let estimates = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + self.esplora_client.get_fee_estimates(), + ) + .await + .map_err(|e| { + log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })? + .map_err(|e| { + log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); + Error::FeerateEstimationUpdateFailed + })?; + + if estimates.is_empty() && self.config.network == Network::Bitcoin { + // Ensure we fail if we didn't receive any estimates. + log_error!( + self.logger, "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", ); - return Err(Error::FeerateEstimationUpdateFailed); - } + return Err(Error::FeerateEstimationUpdateFailed); + } - let confirmation_targets = get_all_conf_targets(); + let confirmation_targets = get_all_conf_targets(); - let mut new_fee_rate_cache = HashMap::with_capacity(10); - for target in confirmation_targets { - let num_blocks = get_num_block_defaults_for_target(target); + let mut new_fee_rate_cache = HashMap::with_capacity(10); + for target in confirmation_targets { + let num_blocks = get_num_block_defaults_for_target(target); - // Convert the retrieved fee rate and fall back to 1 sat/vb if we fail or it - // yields less than that. This is mostly necessary to continue on - // `signet`/`regtest` where we might not get estimates (or bogus values). - let converted_estimate_sat_vb = - esplora_client::convert_fee_rate(num_blocks, estimates.clone()) - .map_or(1.0, |converted| converted.max(1.0)); + // Convert the retrieved fee rate and fall back to 1 sat/vb if we fail or it + // yields less than that. This is mostly necessary to continue on + // `signet`/`regtest` where we might not get estimates (or bogus values). + let converted_estimate_sat_vb = + esplora_client::convert_fee_rate(num_blocks, estimates.clone()) + .map_or(1.0, |converted| converted.max(1.0)); - let fee_rate = - FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); + let fee_rate = FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); - // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that - // require some post-estimation adjustments to the fee rates, which we do here. - let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); - new_fee_rate_cache.insert(target, adjusted_fee_rate); + new_fee_rate_cache.insert(target, adjusted_fee_rate); - log_trace!( - logger, - "Fee rate estimation updated for {:?}: {} sats/kwu", - target, - adjusted_fee_rate.to_sat_per_kwu(), - ); - } + log_trace!( + self.logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } - fee_estimator.set_fee_rate_cache(new_fee_rate_cache); + self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - log_info!( - logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - } + log_info!( + self.logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } - Ok(()) + Ok(()) + } +} + +impl ChainSource { + pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.update_fee_rate_estimates().await }, ChainSourceKind::Electrum { electrum_runtime_status, @@ -1514,84 +1537,87 @@ impl ChainSource { } } -impl ChainSource { +impl EsploraChainSource { pub(crate) async fn process_broadcast_queue(&self) { - match &self.kind { - ChainSourceKind::Esplora { esplora_client, tx_broadcaster, logger, .. } => { - let mut receiver = tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let txid = tx.compute_txid(); - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - esplora_client.broadcast(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { + let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in &next_package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + self.esplora_client.broadcast(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_trace!(self.logger, "Successfully broadcast transaction {}", txid); + }, + Err(e) => match e { + esplora_client::Error::HttpResponse { status, message } => { + if status == 400 { + // Log 400 at lesser level, as this often just means bitcoind already knows the + // transaction. + // FIXME: We can further differentiate here based on the error + // message which will be available with rust-esplora-client 0.7 and + // later. log_trace!( - logger, - "Successfully broadcast transaction {}", - txid + self.logger, + "Failed to broadcast due to HTTP connection error: {}", + message ); - }, - Err(e) => match e { - esplora_client::Error::HttpResponse { status, message } => { - if status == 400 { - // Log 400 at lesser level, as this often just means bitcoind already knows the - // transaction. - // FIXME: We can further differentiate here based on the error - // message which will be available with rust-esplora-client 0.7 and - // later. - log_trace!( - logger, - "Failed to broadcast due to HTTP connection error: {}", - message - ); - } else { - log_error!( - logger, - "Failed to broadcast due to HTTP connection error: {} - {}", - status, message - ); - } - log_trace!( - logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - _ => { - log_error!( - logger, - "Failed to broadcast transaction {}: {}", - txid, - e - ); - log_trace!( - logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - }, + } else { + log_error!( + self.logger, + "Failed to broadcast due to HTTP connection error: {} - {}", + status, + message + ); + } + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); }, - Err(e) => { + _ => { log_error!( - logger, - "Failed to broadcast transaction due to timeout {}: {}", + self.logger, + "Failed to broadcast transaction {}: {}", txid, e ); log_trace!( - logger, + self.logger, "Failed broadcast transaction bytes: {}", log_bytes!(tx.encode()) ); }, - } - } + }, + }, + Err(e) => { + log_error!( + self.logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, } + } + } + } +} + +impl ChainSource { + pub(crate) async fn process_broadcast_queue(&self) { + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.process_broadcast_queue().await }, ChainSourceKind::Electrum { electrum_runtime_status, tx_broadcaster, .. } => { let electrum_client: Arc = if let Some(client) = @@ -1671,10 +1697,21 @@ impl ChainSource { } } +impl Filter for EsploraChainSource { + fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { + self.tx_sync.register_tx(txid, script_pubkey); + } + fn register_output(&self, output: WatchedOutput) { + self.tx_sync.register_output(output); + } +} + impl Filter for ChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { match &self.kind { - ChainSourceKind::Esplora { tx_sync, .. } => tx_sync.register_tx(txid, script_pubkey), + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.register_tx(txid, script_pubkey) + }, ChainSourceKind::Electrum { electrum_runtime_status, .. } => { electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) }, @@ -1683,7 +1720,9 @@ impl Filter for ChainSource { } fn register_output(&self, output: lightning::chain::WatchedOutput) { match &self.kind { - ChainSourceKind::Esplora { tx_sync, .. } => tx_sync.register_output(output), + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.register_output(output) + }, ChainSourceKind::Electrum { electrum_runtime_status, .. } => { electrum_runtime_status.write().unwrap().register_output(output) }, diff --git a/src/config.rs b/src/config.rs index 7b7ed8156..a5048e64f 100644 --- a/src/config.rs +++ b/src/config.rs @@ -39,6 +39,12 @@ pub const DEFAULT_LOG_FILENAME: &'static str = "ldk_node.log"; /// The default storage directory. pub const DEFAULT_STORAGE_DIR_PATH: &str = "/tmp/ldk_node"; +// The default Esplora server we're using. +pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; + +// The default Esplora client timeout we're using. +pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; + // The 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold // number of derivation indexes after which BDK stops looking for new scripts belonging to the wallet. pub(crate) const BDK_CLIENT_STOP_GAP: usize = 20; From 8a01e17c9d3a7b74838c0ba0bfe7d15da2074e01 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 29 Jul 2025 09:09:21 +0200 Subject: [PATCH 030/184] Move `EsploraChainSource` to a new module `chain::esplora` --- src/chain/esplora.rs | 448 +++++++++++++++++++++++++++++++++++++++++++ src/chain/mod.rs | 431 +---------------------------------------- 2 files changed, 451 insertions(+), 428 deletions(-) create mode 100644 src/chain/esplora.rs diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs new file mode 100644 index 000000000..3a911394c --- /dev/null +++ b/src/chain/esplora.rs @@ -0,0 +1,448 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; + +use crate::config::{ + Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, + BDK_WALLET_SYNC_TIMEOUT_SECS, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, + FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, +}; +use crate::fee_estimator::{ + apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, + OnchainFeeEstimator, +}; +use crate::io::utils::write_node_metrics; +use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::{Error, NodeMetrics}; + +use lightning::chain::{Confirm, Filter, WatchedOutput}; +use lightning::util::ser::Writeable; + +use lightning_transaction_sync::EsploraSyncClient; + +use bdk_esplora::EsploraAsyncExt; + +use esplora_client::AsyncClient as EsploraAsyncClient; + +use bitcoin::{FeeRate, Network, Script, Txid}; + +use std::collections::HashMap; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +pub(super) struct EsploraChainSource { + pub(super) sync_config: EsploraSyncConfig, + esplora_client: EsploraAsyncClient, + onchain_wallet: Arc, + onchain_wallet_sync_status: Mutex, + tx_sync: Arc>>, + lightning_wallet_sync_status: Mutex, + fee_estimator: Arc, + tx_broadcaster: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, +} + +impl EsploraChainSource { + pub(crate) fn new( + server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, + ) -> Self { + // FIXME / TODO: We introduced this to make `bdk_esplora` work separately without updating + // `lightning-transaction-sync`. We should revert this as part of of the upgrade to LDK 0.2. + let mut client_builder_0_11 = esplora_client_0_11::Builder::new(&server_url); + client_builder_0_11 = client_builder_0_11.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + + for (header_name, header_value) in &headers { + client_builder_0_11 = client_builder_0_11.header(header_name, header_value); + } + + let esplora_client_0_11 = client_builder_0_11.build_async().unwrap(); + let tx_sync = + Arc::new(EsploraSyncClient::from_client(esplora_client_0_11, Arc::clone(&logger))); + + let mut client_builder = esplora_client::Builder::new(&server_url); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + + for (header_name, header_value) in &headers { + client_builder = client_builder.header(header_name, header_value); + } + + let esplora_client = client_builder.build_async().unwrap(); + + let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + Self { + sync_config, + esplora_client, + onchain_wallet, + onchain_wallet_sync_status, + tx_sync, + lightning_wallet_sync_status, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger, + node_metrics, + } + } + + pub(super) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + let receiver_res = { + let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let res = { + // If this is our first sync, do a full scan with the configured gap limit. + // Otherwise just do an incremental sync. + let incremental_sync = + self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + + macro_rules! get_and_apply_wallet_update { + ($sync_future: expr) => {{ + let now = Instant::now(); + match $sync_future.await { + Ok(res) => match res { + Ok(update) => match self.onchain_wallet.apply_update(update) { + Ok(()) => { + log_info!( + self.logger, + "{} of on-chain wallet finished in {}ms.", + if incremental_sync { "Incremental sync" } else { "Sync" }, + now.elapsed().as_millis() + ); + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger) + )?; + } + Ok(()) + }, + Err(e) => Err(e), + }, + Err(e) => match *e { + esplora_client::Error::Reqwest(he) => { + log_error!( + self.logger, + "{} of on-chain wallet failed due to HTTP connection error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + he + ); + Err(Error::WalletOperationFailed) + }, + _ => { + log_error!( + self.logger, + "{} of on-chain wallet failed due to Esplora error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationFailed) + }, + }, + }, + Err(e) => { + log_error!( + self.logger, + "{} of on-chain wallet timed out: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationTimeout) + }, + } + }} + } + + if incremental_sync { + let sync_request = self.onchain_wallet.get_incremental_sync_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } else { + let full_scan_request = self.onchain_wallet.get_full_scan_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + self.esplora_client.full_scan( + full_scan_request, + BDK_CLIENT_STOP_GAP, + BDK_CLIENT_CONCURRENCY, + ), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } + }; + + self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + pub(super) async fn sync_lightning_wallet( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + &*sync_cman as &(dyn Confirm + Sync + Send), + &*sync_cmon as &(dyn Confirm + Sync + Send), + &*sync_sweeper as &(dyn Confirm + Sync + Send), + ]; + + let receiver_res = { + let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + let res = { + let timeout_fut = tokio::time::timeout( + Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), + self.tx_sync.sync(confirmables), + ); + let now = Instant::now(); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_info!( + self.logger, + "Sync of Lightning wallet finished in {}ms.", + now.elapsed().as_millis() + ); + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.node_metrics), + )?; + Ok(()) + }, + Err(e) => { + log_error!(self.logger, "Sync of Lightning wallet failed: {}", e); + Err(e.into()) + }, + }, + Err(e) => { + log_error!(self.logger, "Lightning wallet sync timed out: {}", e); + Err(Error::TxSyncTimeout) + }, + } + }; + + self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + let now = Instant::now(); + let estimates = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + self.esplora_client.get_fee_estimates(), + ) + .await + .map_err(|e| { + log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })? + .map_err(|e| { + log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); + Error::FeerateEstimationUpdateFailed + })?; + + if estimates.is_empty() && self.config.network == Network::Bitcoin { + // Ensure we fail if we didn't receive any estimates. + log_error!( + self.logger, + "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", + ); + return Err(Error::FeerateEstimationUpdateFailed); + } + + let confirmation_targets = get_all_conf_targets(); + + let mut new_fee_rate_cache = HashMap::with_capacity(10); + for target in confirmation_targets { + let num_blocks = get_num_block_defaults_for_target(target); + + // Convert the retrieved fee rate and fall back to 1 sat/vb if we fail or it + // yields less than that. This is mostly necessary to continue on + // `signet`/`regtest` where we might not get estimates (or bogus values). + let converted_estimate_sat_vb = + esplora_client::convert_fee_rate(num_blocks, estimates.clone()) + .map_or(1.0, |converted| converted.max(1.0)); + + let fee_rate = FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); + + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); + + new_fee_rate_cache.insert(target, adjusted_fee_rate); + + log_trace!( + self.logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } + + self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); + + log_info!( + self.logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + Ok(()) + } + + pub(crate) async fn process_broadcast_queue(&self) { + let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in &next_package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + self.esplora_client.broadcast(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_trace!(self.logger, "Successfully broadcast transaction {}", txid); + }, + Err(e) => match e { + esplora_client::Error::HttpResponse { status, message } => { + if status == 400 { + // Log 400 at lesser level, as this often just means bitcoind already knows the + // transaction. + // FIXME: We can further differentiate here based on the error + // message which will be available with rust-esplora-client 0.7 and + // later. + log_trace!( + self.logger, + "Failed to broadcast due to HTTP connection error: {}", + message + ); + } else { + log_error!( + self.logger, + "Failed to broadcast due to HTTP connection error: {} - {}", + status, + message + ); + } + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + _ => { + log_error!( + self.logger, + "Failed to broadcast transaction {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + }, + }, + Err(e) => { + log_error!( + self.logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + } + } + } + } +} + +impl Filter for EsploraChainSource { + fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { + self.tx_sync.register_tx(txid, script_pubkey); + } + fn register_output(&self, output: WatchedOutput) { + self.tx_sync.register_output(output); + } +} diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 4544a6d3a..5674bad8b 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -7,16 +7,16 @@ mod bitcoind; mod electrum; +mod esplora; use crate::chain::bitcoind::{ BitcoindClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, }; use crate::chain::electrum::ElectrumRuntimeClient; +use crate::chain::esplora::EsploraChainSource; use crate::config::{ BackgroundSyncConfig, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, - BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, - DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, - LDK_WALLET_SYNC_TIMEOUT_SECS, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, + FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, TX_BROADCAST_TIMEOUT_SECS, WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; use crate::fee_estimator::{ @@ -37,11 +37,6 @@ use lightning_block_sync::init::{synchronize_listeners, validate_best_block_head use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; use lightning_block_sync::{BlockSourceErrorKind, SpvClient}; -use lightning_transaction_sync::EsploraSyncClient; - -use bdk_esplora::EsploraAsyncExt; -use esplora_client::AsyncClient as EsploraAsyncClient; - use bdk_wallet::Update as BdkUpdate; use bitcoin::{FeeRate, Network, Script, ScriptBuf, Txid}; @@ -181,70 +176,6 @@ impl ElectrumRuntimeStatus { } } -pub(super) struct EsploraChainSource { - pub(super) sync_config: EsploraSyncConfig, - esplora_client: EsploraAsyncClient, - onchain_wallet: Arc, - onchain_wallet_sync_status: Mutex, - tx_sync: Arc>>, - lightning_wallet_sync_status: Mutex, - fee_estimator: Arc, - tx_broadcaster: Arc, - kv_store: Arc, - config: Arc, - logger: Arc, - node_metrics: Arc>, -} - -impl EsploraChainSource { - pub(crate) fn new( - server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - logger: Arc, node_metrics: Arc>, - ) -> Self { - // FIXME / TODO: We introduced this to make `bdk_esplora` work separately without updating - // `lightning-transaction-sync`. We should revert this as part of of the upgrade to LDK 0.2. - let mut client_builder_0_11 = esplora_client_0_11::Builder::new(&server_url); - client_builder_0_11 = client_builder_0_11.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - - for (header_name, header_value) in &headers { - client_builder_0_11 = client_builder_0_11.header(header_name, header_value); - } - - let esplora_client_0_11 = client_builder_0_11.build_async().unwrap(); - let tx_sync = - Arc::new(EsploraSyncClient::from_client(esplora_client_0_11, Arc::clone(&logger))); - - let mut client_builder = esplora_client::Builder::new(&server_url); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - - for (header_name, header_value) in &headers { - client_builder = client_builder.header(header_name, header_value); - } - - let esplora_client = client_builder.build_async().unwrap(); - - let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - Self { - sync_config, - esplora_client, - onchain_wallet, - onchain_wallet_sync_status, - tx_sync, - lightning_wallet_sync_status, - fee_estimator, - tx_broadcaster, - kv_store, - config, - logger, - node_metrics, - } - } -} - - pub(crate) struct ChainSource { kind: ChainSourceKind, logger: Arc, @@ -724,118 +655,6 @@ impl ChainSource { } } -impl EsploraChainSource { - pub(super) async fn sync_onchain_wallet(&self) -> Result<(), Error> { - let receiver_res = { - let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - let res = { - // If this is our first sync, do a full scan with the configured gap limit. - // Otherwise just do an incremental sync. - let incremental_sync = - self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); - - macro_rules! get_and_apply_wallet_update { - ($sync_future: expr) => {{ - let now = Instant::now(); - match $sync_future.await { - Ok(res) => match res { - Ok(update) => match self.onchain_wallet.apply_update(update) { - Ok(()) => { - log_info!( - self.logger, - "{} of on-chain wallet finished in {}ms.", - if incremental_sync { "Incremental sync" } else { "Sync" }, - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger) - )?; - } - Ok(()) - }, - Err(e) => Err(e), - }, - Err(e) => match *e { - esplora_client::Error::Reqwest(he) => { - log_error!( - self.logger, - "{} of on-chain wallet failed due to HTTP connection error: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - he - ); - Err(Error::WalletOperationFailed) - }, - _ => { - log_error!( - self.logger, - "{} of on-chain wallet failed due to Esplora error: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - e - ); - Err(Error::WalletOperationFailed) - }, - }, - }, - Err(e) => { - log_error!( - self.logger, - "{} of on-chain wallet timed out: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - e - ); - Err(Error::WalletOperationTimeout) - }, - } - }} - } - - if incremental_sync { - let sync_request = self.onchain_wallet.get_incremental_sync_request(); - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), - ); - get_and_apply_wallet_update!(wallet_sync_timeout_fut) - } else { - let full_scan_request = self.onchain_wallet.get_full_scan_request(); - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - self.esplora_client.full_scan( - full_scan_request, - BDK_CLIENT_STOP_GAP, - BDK_CLIENT_CONCURRENCY, - ), - ); - get_and_apply_wallet_update!(wallet_sync_timeout_fut) - } - }; - - self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res - } -} - impl ChainSource { // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) @@ -945,87 +764,6 @@ impl ChainSource { } } -impl EsploraChainSource { - pub(super) async fn sync_lightning_wallet( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, - ) -> Result<(), Error> { - let sync_cman = Arc::clone(&channel_manager); - let sync_cmon = Arc::clone(&chain_monitor); - let sync_sweeper = Arc::clone(&output_sweeper); - let confirmables = vec![ - &*sync_cman as &(dyn Confirm + Sync + Send), - &*sync_cmon as &(dyn Confirm + Sync + Send), - &*sync_sweeper as &(dyn Confirm + Sync + Send), - ]; - - let receiver_res = { - let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - let res = { - let timeout_fut = tokio::time::timeout( - Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), - self.tx_sync.sync(confirmables), - ); - let now = Instant::now(); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_info!( - self.logger, - "Sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); - - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; - } - - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - Arc::clone(&self.node_metrics), - )?; - Ok(()) - }, - Err(e) => { - log_error!(self.logger, "Sync of Lightning wallet failed: {}", e); - Err(e.into()) - }, - }, - Err(e) => { - log_error!(self.logger, "Lightning wallet sync timed out: {}", e); - Err(Error::TxSyncTimeout) - }, - } - }; - - self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res - } -} - impl ChainSource { // Synchronize the Lightning wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) @@ -1275,84 +1013,6 @@ impl ChainSource { } } -impl EsploraChainSource { - pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { - let now = Instant::now(); - let estimates = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), - self.esplora_client.get_fee_estimates(), - ) - .await - .map_err(|e| { - log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); - Error::FeerateEstimationUpdateTimeout - })? - .map_err(|e| { - log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); - Error::FeerateEstimationUpdateFailed - })?; - - if estimates.is_empty() && self.config.network == Network::Bitcoin { - // Ensure we fail if we didn't receive any estimates. - log_error!( - self.logger, - "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", - ); - return Err(Error::FeerateEstimationUpdateFailed); - } - - let confirmation_targets = get_all_conf_targets(); - - let mut new_fee_rate_cache = HashMap::with_capacity(10); - for target in confirmation_targets { - let num_blocks = get_num_block_defaults_for_target(target); - - // Convert the retrieved fee rate and fall back to 1 sat/vb if we fail or it - // yields less than that. This is mostly necessary to continue on - // `signet`/`regtest` where we might not get estimates (or bogus values). - let converted_estimate_sat_vb = - esplora_client::convert_fee_rate(num_blocks, estimates.clone()) - .map_or(1.0, |converted| converted.max(1.0)); - - let fee_rate = FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); - - // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that - // require some post-estimation adjustments to the fee rates, which we do here. - let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); - - new_fee_rate_cache.insert(target, adjusted_fee_rate); - - log_trace!( - self.logger, - "Fee rate estimation updated for {:?}: {} sats/kwu", - target, - adjusted_fee_rate.to_sat_per_kwu(), - ); - } - - self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - - log_info!( - self.logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; - } - - Ok(()) - } -} - impl ChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { match &self.kind { @@ -1537,82 +1197,6 @@ impl ChainSource { } } -impl EsploraChainSource { - pub(crate) async fn process_broadcast_queue(&self) { - let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let txid = tx.compute_txid(); - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - self.esplora_client.broadcast(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_trace!(self.logger, "Successfully broadcast transaction {}", txid); - }, - Err(e) => match e { - esplora_client::Error::HttpResponse { status, message } => { - if status == 400 { - // Log 400 at lesser level, as this often just means bitcoind already knows the - // transaction. - // FIXME: We can further differentiate here based on the error - // message which will be available with rust-esplora-client 0.7 and - // later. - log_trace!( - self.logger, - "Failed to broadcast due to HTTP connection error: {}", - message - ); - } else { - log_error!( - self.logger, - "Failed to broadcast due to HTTP connection error: {} - {}", - status, - message - ); - } - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - _ => { - log_error!( - self.logger, - "Failed to broadcast transaction {}: {}", - txid, - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - }, - }, - Err(e) => { - log_error!( - self.logger, - "Failed to broadcast transaction due to timeout {}: {}", - txid, - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - } - } - } - } -} - impl ChainSource { pub(crate) async fn process_broadcast_queue(&self) { match &self.kind { @@ -1697,15 +1281,6 @@ impl ChainSource { } } -impl Filter for EsploraChainSource { - fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { - self.tx_sync.register_tx(txid, script_pubkey); - } - fn register_output(&self, output: WatchedOutput) { - self.tx_sync.register_output(output); - } -} - impl Filter for ChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { match &self.kind { From fd517ed951905dec2c2b72448a9cc2229a636dd5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 24 Jul 2025 16:01:45 +0200 Subject: [PATCH 031/184] Move Electrum sync logic to a `ElectrumChainSource` type We refactor our `ChainSource` logic and move out the Electrum code into a new object. --- src/chain/electrum.rs | 92 +++++- src/chain/mod.rs | 633 ++++++++++++++++++++---------------------- 2 files changed, 378 insertions(+), 347 deletions(-) diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 9882e652b..844e46187 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -32,7 +32,7 @@ use electrum_client::Client as ElectrumClient; use electrum_client::ConfigBuilder as ElectrumConfigBuilder; use electrum_client::{Batch, ElectrumApi}; -use bitcoin::{FeeRate, Network, Script, Transaction, Txid}; +use bitcoin::{FeeRate, Network, Script, ScriptBuf, Transaction, Txid}; use std::collections::HashMap; use std::sync::Arc; @@ -42,7 +42,83 @@ const BDK_ELECTRUM_CLIENT_BATCH_SIZE: usize = 5; const ELECTRUM_CLIENT_NUM_RETRIES: u8 = 3; const ELECTRUM_CLIENT_TIMEOUT_SECS: u8 = 10; -pub(crate) struct ElectrumRuntimeClient { +pub(super) enum ElectrumRuntimeStatus { + Started(Arc), + Stopped { + pending_registered_txs: Vec<(Txid, ScriptBuf)>, + pending_registered_outputs: Vec, + }, +} + +impl ElectrumRuntimeStatus { + pub(super) fn new() -> Self { + let pending_registered_txs = Vec::new(); + let pending_registered_outputs = Vec::new(); + Self::Stopped { pending_registered_txs, pending_registered_outputs } + } + + pub(super) fn start( + &mut self, server_url: String, runtime: Arc, config: Arc, + logger: Arc, + ) -> Result<(), Error> { + match self { + Self::Stopped { pending_registered_txs, pending_registered_outputs } => { + let client = Arc::new(ElectrumRuntimeClient::new( + server_url.clone(), + runtime, + config, + logger, + )?); + + // Apply any pending `Filter` entries + for (txid, script_pubkey) in pending_registered_txs.drain(..) { + client.register_tx(&txid, &script_pubkey); + } + + for output in pending_registered_outputs.drain(..) { + client.register_output(output) + } + + *self = Self::Started(client); + }, + Self::Started(_) => { + debug_assert!(false, "We shouldn't call start if we're already started") + }, + } + Ok(()) + } + + pub(super) fn stop(&mut self) { + *self = Self::new() + } + + pub(super) fn client(&self) -> Option> { + match self { + Self::Started(client) => Some(Arc::clone(&client)), + Self::Stopped { .. } => None, + } + } + + pub(super) fn register_tx(&mut self, txid: &Txid, script_pubkey: &Script) { + match self { + Self::Started(client) => client.register_tx(txid, script_pubkey), + Self::Stopped { pending_registered_txs, .. } => { + pending_registered_txs.push((*txid, script_pubkey.to_owned())) + }, + } + } + + pub(super) fn register_output(&mut self, output: WatchedOutput) { + match self { + Self::Started(client) => client.register_output(output), + Self::Stopped { pending_registered_outputs, .. } => { + pending_registered_outputs.push(output) + }, + } + } +} + +pub(super) struct ElectrumRuntimeClient { electrum_client: Arc, bdk_electrum_client: Arc>, tx_sync: Arc>>, @@ -52,7 +128,7 @@ pub(crate) struct ElectrumRuntimeClient { } impl ElectrumRuntimeClient { - pub(crate) fn new( + pub(super) fn new( server_url: String, runtime: Arc, config: Arc, logger: Arc, ) -> Result { @@ -82,7 +158,7 @@ impl ElectrumRuntimeClient { Ok(Self { electrum_client, bdk_electrum_client, tx_sync, runtime, config, logger }) } - pub(crate) async fn sync_confirmables( + pub(super) async fn sync_confirmables( &self, confirmables: Vec>, ) -> Result<(), Error> { let now = Instant::now(); @@ -116,7 +192,7 @@ impl ElectrumRuntimeClient { Ok(res) } - pub(crate) async fn get_full_scan_wallet_update( + pub(super) async fn get_full_scan_wallet_update( &self, request: BdkFullScanRequest, cached_txs: impl IntoIterator>>, ) -> Result, Error> { @@ -150,7 +226,7 @@ impl ElectrumRuntimeClient { }) } - pub(crate) async fn get_incremental_sync_wallet_update( + pub(super) async fn get_incremental_sync_wallet_update( &self, request: BdkSyncRequest<(BdkKeyChainKind, u32)>, cached_txs: impl IntoIterator>>, ) -> Result { @@ -179,7 +255,7 @@ impl ElectrumRuntimeClient { }) } - pub(crate) async fn broadcast(&self, tx: Transaction) { + pub(super) async fn broadcast(&self, tx: Transaction) { let electrum_client = Arc::clone(&self.electrum_client); let txid = tx.compute_txid(); @@ -221,7 +297,7 @@ impl ElectrumRuntimeClient { } } - pub(crate) async fn get_fee_rate_cache_update( + pub(super) async fn get_fee_rate_cache_update( &self, ) -> Result, Error> { let electrum_client = Arc::clone(&self.electrum_client); diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 5674bad8b..911975173 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -9,10 +9,11 @@ mod bitcoind; mod electrum; mod esplora; +use electrum::{ElectrumRuntimeClient, ElectrumRuntimeStatus}; + use crate::chain::bitcoind::{ BitcoindClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, }; -use crate::chain::electrum::ElectrumRuntimeClient; use crate::chain::esplora::EsploraChainSource; use crate::config::{ BackgroundSyncConfig, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, @@ -39,7 +40,7 @@ use lightning_block_sync::{BlockSourceErrorKind, SpvClient}; use bdk_wallet::Update as BdkUpdate; -use bitcoin::{FeeRate, Network, Script, ScriptBuf, Txid}; +use bitcoin::{FeeRate, Network, Script, Txid}; use std::collections::HashMap; use std::sync::{Arc, Mutex, RwLock}; @@ -100,79 +101,58 @@ impl WalletSyncStatus { } } -pub(crate) enum ElectrumRuntimeStatus { - Started(Arc), - Stopped { - pending_registered_txs: Vec<(Txid, ScriptBuf)>, - pending_registered_outputs: Vec, - }, +pub(super) struct ElectrumChainSource { + server_url: String, + pub(super) sync_config: ElectrumSyncConfig, + electrum_runtime_status: RwLock, + onchain_wallet: Arc, + onchain_wallet_sync_status: Mutex, + lightning_wallet_sync_status: Mutex, + fee_estimator: Arc, + tx_broadcaster: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, } -impl ElectrumRuntimeStatus { - pub(crate) fn new() -> Self { - let pending_registered_txs = Vec::new(); - let pending_registered_outputs = Vec::new(); - Self::Stopped { pending_registered_txs, pending_registered_outputs } - } - - pub(crate) fn start( - &mut self, server_url: String, runtime: Arc, config: Arc, - logger: Arc, - ) -> Result<(), Error> { - match self { - Self::Stopped { pending_registered_txs, pending_registered_outputs } => { - let client = Arc::new(ElectrumRuntimeClient::new( - server_url.clone(), - runtime, - config, - logger, - )?); - - // Apply any pending `Filter` entries - for (txid, script_pubkey) in pending_registered_txs.drain(..) { - client.register_tx(&txid, &script_pubkey); - } - - for output in pending_registered_outputs.drain(..) { - client.register_output(output) - } - - *self = Self::Started(client); - }, - Self::Started(_) => { - debug_assert!(false, "We shouldn't call start if we're already started") - }, - } - Ok(()) - } - - pub(crate) fn stop(&mut self) { - *self = Self::new() - } - - pub(crate) fn client(&self) -> Option> { - match self { - Self::Started(client) => Some(Arc::clone(&client)), - Self::Stopped { .. } => None, +impl ElectrumChainSource { + pub(super) fn new( + server_url: String, sync_config: ElectrumSyncConfig, onchain_wallet: Arc, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, + ) -> Self { + let electrum_runtime_status = RwLock::new(ElectrumRuntimeStatus::new()); + let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + Self { + server_url, + sync_config, + electrum_runtime_status, + onchain_wallet, + onchain_wallet_sync_status, + lightning_wallet_sync_status, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger: Arc::clone(&logger), + node_metrics, } } - fn register_tx(&mut self, txid: &Txid, script_pubkey: &Script) { - match self { - Self::Started(client) => client.register_tx(txid, script_pubkey), - Self::Stopped { pending_registered_txs, .. } => { - pending_registered_txs.push((*txid, script_pubkey.to_owned())) - }, - } + pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { + self.electrum_runtime_status.write().unwrap().start( + self.server_url.clone(), + Arc::clone(&runtime), + Arc::clone(&self.config), + Arc::clone(&self.logger), + ) } - fn register_output(&mut self, output: lightning::chain::WatchedOutput) { - match self { - Self::Started(client) => client.register_output(output), - Self::Stopped { pending_registered_outputs, .. } => { - pending_registered_outputs.push(output) - }, - } + pub(super) fn stop(&self) { + self.electrum_runtime_status.write().unwrap().stop(); } } @@ -183,20 +163,7 @@ pub(crate) struct ChainSource { enum ChainSourceKind { Esplora(EsploraChainSource), - Electrum { - server_url: String, - sync_config: ElectrumSyncConfig, - electrum_runtime_status: RwLock, - onchain_wallet: Arc, - onchain_wallet_sync_status: Mutex, - lightning_wallet_sync_status: Mutex, - fee_estimator: Arc, - tx_broadcaster: Arc, - kv_store: Arc, - config: Arc, - logger: Arc, - node_metrics: Arc>, - }, + Electrum(ElectrumChainSource), Bitcoind { api_client: Arc, header_cache: tokio::sync::Mutex, @@ -241,23 +208,18 @@ impl ChainSource { kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, ) -> Self { - let electrum_runtime_status = RwLock::new(ElectrumRuntimeStatus::new()); - let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - let kind = ChainSourceKind::Electrum { + let electrum_chain_source = ElectrumChainSource::new( server_url, sync_config, - electrum_runtime_status, onchain_wallet, - onchain_wallet_sync_status, - lightning_wallet_sync_status, fee_estimator, tx_broadcaster, kv_store, config, - logger: Arc::clone(&logger), + Arc::clone(&logger), node_metrics, - }; + ); + let kind = ChainSourceKind::Electrum(electrum_chain_source); Self { kind, logger } } @@ -331,19 +293,8 @@ impl ChainSource { pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { match &self.kind { - ChainSourceKind::Electrum { - server_url, - electrum_runtime_status, - config, - logger, - .. - } => { - electrum_runtime_status.write().unwrap().start( - server_url.clone(), - Arc::clone(&runtime), - Arc::clone(&config), - Arc::clone(&logger), - )?; + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.start(runtime)? }, _ => { // Nothing to do for other chain sources. @@ -354,9 +305,7 @@ impl ChainSource { pub(crate) fn stop(&self) { match &self.kind { - ChainSourceKind::Electrum { electrum_runtime_status, .. } => { - electrum_runtime_status.write().unwrap().stop(); - }, + ChainSourceKind::Electrum(electrum_chain_source) => electrum_chain_source.stop(), _ => { // Nothing to do for other chain sources. }, @@ -406,15 +355,17 @@ impl ChainSource { return; } }, - ChainSourceKind::Electrum { sync_config, logger, .. } => { - if let Some(background_sync_config) = sync_config.background_sync_config.as_ref() { + ChainSourceKind::Electrum(electrum_chain_source) => { + if let Some(background_sync_config) = + electrum_chain_source.sync_config.background_sync_config.as_ref() + { self.start_tx_based_sync_loop( stop_sync_receiver, channel_manager, chain_monitor, output_sweeper, background_sync_config, - Arc::clone(&logger), + Arc::clone(&self.logger), ) .await } else { @@ -655,6 +606,90 @@ impl ChainSource { } } +impl ElectrumChainSource { + pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the onchain wallet" + ); + return Err(Error::FeerateEstimationUpdateFailed); + }; + let receiver_res = { + let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + // If this is our first sync, do a full scan with the configured gap limit. + // Otherwise just do an incremental sync. + let incremental_sync = + self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + + let apply_wallet_update = + |update_res: Result, now: Instant| match update_res { + Ok(update) => match self.onchain_wallet.apply_update(update) { + Ok(()) => { + log_info!( + self.logger, + "{} of on-chain wallet finished in {}ms.", + if incremental_sync { "Incremental sync" } else { "Sync" }, + now.elapsed().as_millis() + ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + Ok(()) + }, + Err(e) => Err(e), + }, + Err(e) => Err(e), + }; + + let cached_txs = self.onchain_wallet.get_cached_txs(); + + let res = if incremental_sync { + let incremental_sync_request = self.onchain_wallet.get_incremental_sync_request(); + let incremental_sync_fut = electrum_client + .get_incremental_sync_wallet_update(incremental_sync_request, cached_txs); + + let now = Instant::now(); + let update_res = incremental_sync_fut.await.map(|u| u.into()); + apply_wallet_update(update_res, now) + } else { + let full_scan_request = self.onchain_wallet.get_full_scan_request(); + let full_scan_fut = + electrum_client.get_full_scan_wallet_update(full_scan_request, cached_txs); + let now = Instant::now(); + let update_res = full_scan_fut.await.map(|u| u.into()); + apply_wallet_update(update_res, now) + }; + + self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } +} + impl ChainSource { // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) @@ -663,97 +698,8 @@ impl ChainSource { ChainSourceKind::Esplora(esplora_chain_source) => { esplora_chain_source.sync_onchain_wallet().await }, - ChainSourceKind::Electrum { - electrum_runtime_status, - onchain_wallet, - onchain_wallet_sync_status, - kv_store, - logger, - node_metrics, - .. - } => { - let electrum_client: Arc = if let Some(client) = - electrum_runtime_status.read().unwrap().client().as_ref() - { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the onchain wallet" - ); - return Err(Error::FeerateEstimationUpdateFailed); - }; - let receiver_res = { - let mut status_lock = onchain_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - // If this is our first sync, do a full scan with the configured gap limit. - // Otherwise just do an incremental sync. - let incremental_sync = - node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); - - let apply_wallet_update = - |update_res: Result, now: Instant| match update_res { - Ok(update) => match onchain_wallet.apply_update(update) { - Ok(()) => { - log_info!( - logger, - "{} of on-chain wallet finished in {}ms.", - if incremental_sync { "Incremental sync" } else { "Sync" }, - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - } - Ok(()) - }, - Err(e) => Err(e), - }, - Err(e) => Err(e), - }; - - let cached_txs = onchain_wallet.get_cached_txs(); - - let res = if incremental_sync { - let incremental_sync_request = onchain_wallet.get_incremental_sync_request(); - let incremental_sync_fut = electrum_client - .get_incremental_sync_wallet_update(incremental_sync_request, cached_txs); - - let now = Instant::now(); - let update_res = incremental_sync_fut.await.map(|u| u.into()); - apply_wallet_update(update_res, now) - } else { - let full_scan_request = onchain_wallet.get_full_scan_request(); - let full_scan_fut = - electrum_client.get_full_scan_wallet_update(full_scan_request, cached_txs); - let now = Instant::now(); - let update_res = full_scan_fut.await.map(|u| u.into()); - apply_wallet_update(update_res, now) - }; - - onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.sync_onchain_wallet().await }, ChainSourceKind::Bitcoind { .. } => { // In BitcoindRpc mode we sync lightning and onchain wallet in one go via @@ -764,6 +710,74 @@ impl ChainSource { } } +impl ElectrumChainSource { + pub(crate) async fn sync_lightning_wallet( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the lightning wallet" + ); + return Err(Error::TxSyncFailed); + }; + + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + sync_cman as Arc, + sync_cmon as Arc, + sync_sweeper as Arc, + ]; + + let receiver_res = { + let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::TxSyncFailed + })?; + } + + let res = electrum_client.sync_confirmables(confirmables).await; + + if let Ok(_) = res { + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.node_metrics), + )?; + } + + self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } +} + impl ChainSource { // Synchronize the Lightning wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) @@ -777,76 +791,10 @@ impl ChainSource { .sync_lightning_wallet(channel_manager, chain_monitor, output_sweeper) .await }, - ChainSourceKind::Electrum { - electrum_runtime_status, - lightning_wallet_sync_status, - kv_store, - logger, - node_metrics, - .. - } => { - let electrum_client: Arc = if let Some(client) = - electrum_runtime_status.read().unwrap().client().as_ref() - { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the lightning wallet" - ); - return Err(Error::TxSyncFailed); - }; - - let sync_cman = Arc::clone(&channel_manager); - let sync_cmon = Arc::clone(&chain_monitor); - let sync_sweeper = Arc::clone(&output_sweeper); - let confirmables = vec![ - sync_cman as Arc, - sync_cmon as Arc, - sync_sweeper as Arc, - ]; - - let receiver_res = { - let mut status_lock = lightning_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(logger, "Failed to receive wallet sync result: {:?}", e); - Error::TxSyncFailed - })?; - } - - let res = electrum_client.sync_confirmables(confirmables).await; - - if let Ok(_) = res { - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - } - - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&kv_store), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )?; - } - - lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source + .sync_lightning_wallet(channel_manager, chain_monitor, output_sweeper) + .await }, ChainSourceKind::Bitcoind { .. } => { // In BitcoindRpc mode we sync lightning and onchain wallet in one go via @@ -1013,56 +961,52 @@ impl ChainSource { } } -impl ChainSource { +impl ElectrumChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { - match &self.kind { - ChainSourceKind::Esplora(esplora_chain_source) => { - esplora_chain_source.update_fee_rate_estimates().await - }, - ChainSourceKind::Electrum { - electrum_runtime_status, - fee_estimator, - kv_store, - logger, - node_metrics, - .. - } => { - let electrum_client: Arc = if let Some(client) = - electrum_runtime_status.read().unwrap().client().as_ref() - { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before updating fees" - ); - return Err(Error::FeerateEstimationUpdateFailed); - }; + let electrum_client: Arc = if let Some(client) = + self.electrum_runtime_status.read().unwrap().client().as_ref() + { + Arc::clone(client) + } else { + debug_assert!(false, "We should have started the chain source before updating fees"); + return Err(Error::FeerateEstimationUpdateFailed); + }; - let now = Instant::now(); + let now = Instant::now(); - let new_fee_rate_cache = electrum_client.get_fee_rate_cache_update().await?; - fee_estimator.set_fee_rate_cache(new_fee_rate_cache); + let new_fee_rate_cache = electrum_client.get_fee_rate_cache_update().await?; + self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - log_info!( - logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); + log_info!( + self.logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - } + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } - Ok(()) + Ok(()) + } +} + +impl ChainSource { + pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.update_fee_rate_estimates().await + }, + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.update_fee_rate_estimates().await }, ChainSourceKind::Bitcoind { api_client, @@ -1197,31 +1141,33 @@ impl ChainSource { } } +impl ElectrumChainSource { + pub(crate) async fn process_broadcast_queue(&self) { + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!(false, "We should have started the chain source before broadcasting"); + return; + }; + + let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in next_package { + electrum_client.broadcast(tx).await; + } + } + } +} + impl ChainSource { pub(crate) async fn process_broadcast_queue(&self) { match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { esplora_chain_source.process_broadcast_queue().await }, - ChainSourceKind::Electrum { electrum_runtime_status, tx_broadcaster, .. } => { - let electrum_client: Arc = if let Some(client) = - electrum_runtime_status.read().unwrap().client().as_ref() - { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before broadcasting" - ); - return; - }; - - let mut receiver = tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in next_package { - electrum_client.broadcast(tx).await; - } - } + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.process_broadcast_queue().await }, ChainSourceKind::Bitcoind { api_client, tx_broadcaster, logger, .. } => { // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 @@ -1281,25 +1227,34 @@ impl ChainSource { } } +impl Filter for ElectrumChainSource { + fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { + self.electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) + } + fn register_output(&self, output: WatchedOutput) { + self.electrum_runtime_status.write().unwrap().register_output(output) + } +} + impl Filter for ChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { esplora_chain_source.register_tx(txid, script_pubkey) }, - ChainSourceKind::Electrum { electrum_runtime_status, .. } => { - electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.register_tx(txid, script_pubkey) }, ChainSourceKind::Bitcoind { .. } => (), } } - fn register_output(&self, output: lightning::chain::WatchedOutput) { + fn register_output(&self, output: WatchedOutput) { match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { esplora_chain_source.register_output(output) }, - ChainSourceKind::Electrum { electrum_runtime_status, .. } => { - electrum_runtime_status.write().unwrap().register_output(output) + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.register_output(output) }, ChainSourceKind::Bitcoind { .. } => (), } From 40d6440218bad490176e5f5685df4b4380d45589 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 29 Jul 2025 09:44:33 +0200 Subject: [PATCH 032/184] Move `ElectrumChainSource` type to `chain::electrum` module --- src/chain/electrum.rs | 305 +++++++++++++++++++++++++++++++++++++++--- src/chain/mod.rs | 282 +------------------------------------- 2 files changed, 291 insertions(+), 296 deletions(-) diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 844e46187..44a637cc3 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -5,16 +5,21 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; + use crate::config::{ - Config, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, - LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, + Config, ElectrumSyncConfig, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, + FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, }; use crate::error::Error; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, - ConfirmationTarget, + ConfirmationTarget, OnchainFeeEstimator, }; +use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::NodeMetrics; use lightning::chain::{Confirm, Filter, WatchedOutput}; use lightning::util::ser::Writeable; @@ -25,6 +30,7 @@ use bdk_chain::bdk_core::spk_client::FullScanResponse as BdkFullScanResponse; use bdk_chain::bdk_core::spk_client::SyncRequest as BdkSyncRequest; use bdk_chain::bdk_core::spk_client::SyncResponse as BdkSyncResponse; use bdk_wallet::KeychainKind as BdkKeyChainKind; +use bdk_wallet::Update as BdkUpdate; use bdk_electrum::BdkElectrumClient; @@ -35,14 +41,279 @@ use electrum_client::{Batch, ElectrumApi}; use bitcoin::{FeeRate, Network, Script, ScriptBuf, Transaction, Txid}; use std::collections::HashMap; -use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; const BDK_ELECTRUM_CLIENT_BATCH_SIZE: usize = 5; const ELECTRUM_CLIENT_NUM_RETRIES: u8 = 3; const ELECTRUM_CLIENT_TIMEOUT_SECS: u8 = 10; -pub(super) enum ElectrumRuntimeStatus { +pub(super) struct ElectrumChainSource { + server_url: String, + pub(super) sync_config: ElectrumSyncConfig, + electrum_runtime_status: RwLock, + onchain_wallet: Arc, + onchain_wallet_sync_status: Mutex, + lightning_wallet_sync_status: Mutex, + fee_estimator: Arc, + tx_broadcaster: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, +} + +impl ElectrumChainSource { + pub(super) fn new( + server_url: String, sync_config: ElectrumSyncConfig, onchain_wallet: Arc, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, + ) -> Self { + let electrum_runtime_status = RwLock::new(ElectrumRuntimeStatus::new()); + let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + Self { + server_url, + sync_config, + electrum_runtime_status, + onchain_wallet, + onchain_wallet_sync_status, + lightning_wallet_sync_status, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger: Arc::clone(&logger), + node_metrics, + } + } + + pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { + self.electrum_runtime_status.write().unwrap().start( + self.server_url.clone(), + Arc::clone(&runtime), + Arc::clone(&self.config), + Arc::clone(&self.logger), + ) + } + + pub(super) fn stop(&self) { + self.electrum_runtime_status.write().unwrap().stop(); + } + + pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the onchain wallet" + ); + return Err(Error::FeerateEstimationUpdateFailed); + }; + let receiver_res = { + let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + // If this is our first sync, do a full scan with the configured gap limit. + // Otherwise just do an incremental sync. + let incremental_sync = + self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + + let apply_wallet_update = + |update_res: Result, now: Instant| match update_res { + Ok(update) => match self.onchain_wallet.apply_update(update) { + Ok(()) => { + log_info!( + self.logger, + "{} of on-chain wallet finished in {}ms.", + if incremental_sync { "Incremental sync" } else { "Sync" }, + now.elapsed().as_millis() + ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + Ok(()) + }, + Err(e) => Err(e), + }, + Err(e) => Err(e), + }; + + let cached_txs = self.onchain_wallet.get_cached_txs(); + + let res = if incremental_sync { + let incremental_sync_request = self.onchain_wallet.get_incremental_sync_request(); + let incremental_sync_fut = electrum_client + .get_incremental_sync_wallet_update(incremental_sync_request, cached_txs); + + let now = Instant::now(); + let update_res = incremental_sync_fut.await.map(|u| u.into()); + apply_wallet_update(update_res, now) + } else { + let full_scan_request = self.onchain_wallet.get_full_scan_request(); + let full_scan_fut = + electrum_client.get_full_scan_wallet_update(full_scan_request, cached_txs); + let now = Instant::now(); + let update_res = full_scan_fut.await.map(|u| u.into()); + apply_wallet_update(update_res, now) + }; + + self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + pub(crate) async fn sync_lightning_wallet( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the lightning wallet" + ); + return Err(Error::TxSyncFailed); + }; + + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + sync_cman as Arc, + sync_cmon as Arc, + sync_sweeper as Arc, + ]; + + let receiver_res = { + let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::TxSyncFailed + })?; + } + + let res = electrum_client.sync_confirmables(confirmables).await; + + if let Ok(_) = res { + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.node_metrics), + )?; + } + + self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + let electrum_client: Arc = if let Some(client) = + self.electrum_runtime_status.read().unwrap().client().as_ref() + { + Arc::clone(client) + } else { + debug_assert!(false, "We should have started the chain source before updating fees"); + return Err(Error::FeerateEstimationUpdateFailed); + }; + + let now = Instant::now(); + + let new_fee_rate_cache = electrum_client.get_fee_rate_cache_update().await?; + self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); + + log_info!( + self.logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + Ok(()) + } + + pub(crate) async fn process_broadcast_queue(&self) { + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!(false, "We should have started the chain source before broadcasting"); + return; + }; + + let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in next_package { + electrum_client.broadcast(tx).await; + } + } + } +} + +impl Filter for ElectrumChainSource { + fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { + self.electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) + } + fn register_output(&self, output: lightning::chain::WatchedOutput) { + self.electrum_runtime_status.write().unwrap().register_output(output) + } +} + +enum ElectrumRuntimeStatus { Started(Arc), Stopped { pending_registered_txs: Vec<(Txid, ScriptBuf)>, @@ -51,7 +322,7 @@ pub(super) enum ElectrumRuntimeStatus { } impl ElectrumRuntimeStatus { - pub(super) fn new() -> Self { + fn new() -> Self { let pending_registered_txs = Vec::new(); let pending_registered_outputs = Vec::new(); Self::Stopped { pending_registered_txs, pending_registered_outputs } @@ -92,14 +363,14 @@ impl ElectrumRuntimeStatus { *self = Self::new() } - pub(super) fn client(&self) -> Option> { + fn client(&self) -> Option> { match self { Self::Started(client) => Some(Arc::clone(&client)), Self::Stopped { .. } => None, } } - pub(super) fn register_tx(&mut self, txid: &Txid, script_pubkey: &Script) { + fn register_tx(&mut self, txid: &Txid, script_pubkey: &Script) { match self { Self::Started(client) => client.register_tx(txid, script_pubkey), Self::Stopped { pending_registered_txs, .. } => { @@ -108,7 +379,7 @@ impl ElectrumRuntimeStatus { } } - pub(super) fn register_output(&mut self, output: WatchedOutput) { + fn register_output(&mut self, output: lightning::chain::WatchedOutput) { match self { Self::Started(client) => client.register_output(output), Self::Stopped { pending_registered_outputs, .. } => { @@ -118,7 +389,7 @@ impl ElectrumRuntimeStatus { } } -pub(super) struct ElectrumRuntimeClient { +struct ElectrumRuntimeClient { electrum_client: Arc, bdk_electrum_client: Arc>, tx_sync: Arc>>, @@ -128,7 +399,7 @@ pub(super) struct ElectrumRuntimeClient { } impl ElectrumRuntimeClient { - pub(super) fn new( + fn new( server_url: String, runtime: Arc, config: Arc, logger: Arc, ) -> Result { @@ -158,7 +429,7 @@ impl ElectrumRuntimeClient { Ok(Self { electrum_client, bdk_electrum_client, tx_sync, runtime, config, logger }) } - pub(super) async fn sync_confirmables( + async fn sync_confirmables( &self, confirmables: Vec>, ) -> Result<(), Error> { let now = Instant::now(); @@ -192,7 +463,7 @@ impl ElectrumRuntimeClient { Ok(res) } - pub(super) async fn get_full_scan_wallet_update( + async fn get_full_scan_wallet_update( &self, request: BdkFullScanRequest, cached_txs: impl IntoIterator>>, ) -> Result, Error> { @@ -226,7 +497,7 @@ impl ElectrumRuntimeClient { }) } - pub(super) async fn get_incremental_sync_wallet_update( + async fn get_incremental_sync_wallet_update( &self, request: BdkSyncRequest<(BdkKeyChainKind, u32)>, cached_txs: impl IntoIterator>>, ) -> Result { @@ -255,7 +526,7 @@ impl ElectrumRuntimeClient { }) } - pub(super) async fn broadcast(&self, tx: Transaction) { + async fn broadcast(&self, tx: Transaction) { let electrum_client = Arc::clone(&self.electrum_client); let txid = tx.compute_txid(); @@ -297,7 +568,7 @@ impl ElectrumRuntimeClient { } } - pub(super) async fn get_fee_rate_cache_update( + async fn get_fee_rate_cache_update( &self, ) -> Result, Error> { let electrum_client = Arc::clone(&self.electrum_client); diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 911975173..045310198 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -9,11 +9,10 @@ mod bitcoind; mod electrum; mod esplora; -use electrum::{ElectrumRuntimeClient, ElectrumRuntimeStatus}; - use crate::chain::bitcoind::{ BitcoindClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, }; +use crate::chain::electrum::ElectrumChainSource; use crate::chain::esplora::EsploraChainSource; use crate::config::{ BackgroundSyncConfig, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, @@ -30,7 +29,7 @@ use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, use crate::{Error, NodeMetrics}; use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; -use lightning::chain::{Confirm, Filter, Listen, WatchedOutput}; +use lightning::chain::{Filter, Listen}; use lightning::util::ser::Writeable; use lightning_block_sync::gossip::UtxoSource; @@ -38,8 +37,6 @@ use lightning_block_sync::init::{synchronize_listeners, validate_best_block_head use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; use lightning_block_sync::{BlockSourceErrorKind, SpvClient}; -use bdk_wallet::Update as BdkUpdate; - use bitcoin::{FeeRate, Network, Script, Txid}; use std::collections::HashMap; @@ -101,61 +98,6 @@ impl WalletSyncStatus { } } -pub(super) struct ElectrumChainSource { - server_url: String, - pub(super) sync_config: ElectrumSyncConfig, - electrum_runtime_status: RwLock, - onchain_wallet: Arc, - onchain_wallet_sync_status: Mutex, - lightning_wallet_sync_status: Mutex, - fee_estimator: Arc, - tx_broadcaster: Arc, - kv_store: Arc, - config: Arc, - logger: Arc, - node_metrics: Arc>, -} - -impl ElectrumChainSource { - pub(super) fn new( - server_url: String, sync_config: ElectrumSyncConfig, onchain_wallet: Arc, - fee_estimator: Arc, tx_broadcaster: Arc, - kv_store: Arc, config: Arc, logger: Arc, - node_metrics: Arc>, - ) -> Self { - let electrum_runtime_status = RwLock::new(ElectrumRuntimeStatus::new()); - let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - Self { - server_url, - sync_config, - electrum_runtime_status, - onchain_wallet, - onchain_wallet_sync_status, - lightning_wallet_sync_status, - fee_estimator, - tx_broadcaster, - kv_store, - config, - logger: Arc::clone(&logger), - node_metrics, - } - } - - pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { - self.electrum_runtime_status.write().unwrap().start( - self.server_url.clone(), - Arc::clone(&runtime), - Arc::clone(&self.config), - Arc::clone(&self.logger), - ) - } - - pub(super) fn stop(&self) { - self.electrum_runtime_status.write().unwrap().stop(); - } -} - pub(crate) struct ChainSource { kind: ChainSourceKind, logger: Arc, @@ -606,90 +548,6 @@ impl ChainSource { } } -impl ElectrumChainSource { - pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the onchain wallet" - ); - return Err(Error::FeerateEstimationUpdateFailed); - }; - let receiver_res = { - let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - // If this is our first sync, do a full scan with the configured gap limit. - // Otherwise just do an incremental sync. - let incremental_sync = - self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); - - let apply_wallet_update = - |update_res: Result, now: Instant| match update_res { - Ok(update) => match self.onchain_wallet.apply_update(update) { - Ok(()) => { - log_info!( - self.logger, - "{} of on-chain wallet finished in {}ms.", - if incremental_sync { "Incremental sync" } else { "Sync" }, - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; - } - Ok(()) - }, - Err(e) => Err(e), - }, - Err(e) => Err(e), - }; - - let cached_txs = self.onchain_wallet.get_cached_txs(); - - let res = if incremental_sync { - let incremental_sync_request = self.onchain_wallet.get_incremental_sync_request(); - let incremental_sync_fut = electrum_client - .get_incremental_sync_wallet_update(incremental_sync_request, cached_txs); - - let now = Instant::now(); - let update_res = incremental_sync_fut.await.map(|u| u.into()); - apply_wallet_update(update_res, now) - } else { - let full_scan_request = self.onchain_wallet.get_full_scan_request(); - let full_scan_fut = - electrum_client.get_full_scan_wallet_update(full_scan_request, cached_txs); - let now = Instant::now(); - let update_res = full_scan_fut.await.map(|u| u.into()); - apply_wallet_update(update_res, now) - }; - - self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res - } -} - impl ChainSource { // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) @@ -710,74 +568,6 @@ impl ChainSource { } } -impl ElectrumChainSource { - pub(crate) async fn sync_lightning_wallet( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, - ) -> Result<(), Error> { - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the lightning wallet" - ); - return Err(Error::TxSyncFailed); - }; - - let sync_cman = Arc::clone(&channel_manager); - let sync_cmon = Arc::clone(&chain_monitor); - let sync_sweeper = Arc::clone(&output_sweeper); - let confirmables = vec![ - sync_cman as Arc, - sync_cmon as Arc, - sync_sweeper as Arc, - ]; - - let receiver_res = { - let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); - Error::TxSyncFailed - })?; - } - - let res = electrum_client.sync_confirmables(confirmables).await; - - if let Ok(_) = res { - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; - } - - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - Arc::clone(&self.node_metrics), - )?; - } - - self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res - } -} - impl ChainSource { // Synchronize the Lightning wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) @@ -961,44 +751,6 @@ impl ChainSource { } } -impl ElectrumChainSource { - pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { - let electrum_client: Arc = if let Some(client) = - self.electrum_runtime_status.read().unwrap().client().as_ref() - { - Arc::clone(client) - } else { - debug_assert!(false, "We should have started the chain source before updating fees"); - return Err(Error::FeerateEstimationUpdateFailed); - }; - - let now = Instant::now(); - - let new_fee_rate_cache = electrum_client.get_fee_rate_cache_update().await?; - self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - - log_info!( - self.logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; - } - - Ok(()) - } -} - impl ChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { match &self.kind { @@ -1141,25 +893,6 @@ impl ChainSource { } } -impl ElectrumChainSource { - pub(crate) async fn process_broadcast_queue(&self) { - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!(false, "We should have started the chain source before broadcasting"); - return; - }; - - let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in next_package { - electrum_client.broadcast(tx).await; - } - } - } -} - impl ChainSource { pub(crate) async fn process_broadcast_queue(&self) { match &self.kind { @@ -1227,15 +960,6 @@ impl ChainSource { } } -impl Filter for ElectrumChainSource { - fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { - self.electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) - } - fn register_output(&self, output: WatchedOutput) { - self.electrum_runtime_status.write().unwrap().register_output(output) - } -} - impl Filter for ChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { match &self.kind { @@ -1248,7 +972,7 @@ impl Filter for ChainSource { ChainSourceKind::Bitcoind { .. } => (), } } - fn register_output(&self, output: WatchedOutput) { + fn register_output(&self, output: lightning::chain::WatchedOutput) { match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { esplora_chain_source.register_output(output) From 4541a0e45d1f662e524b80995c46d543801961a6 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 24 Jul 2025 16:29:36 +0200 Subject: [PATCH 033/184] Move Bitcoind sync logic to a `BitcoindChainSource` type We refactor our `ChainSource` logic and move out the Bitcoind code into a new object. --- src/chain/bitcoind.rs | 9 +- src/chain/mod.rs | 1081 +++++++++++++++++++++-------------------- 2 files changed, 567 insertions(+), 523 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 98e77cac7..52dad7741 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -7,10 +7,8 @@ use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; -use base64::prelude::BASE64_STANDARD; -use base64::Engine; -use bitcoin::{BlockHash, FeeRate, Transaction, Txid}; use lightning::chain::Listen; + use lightning_block_sync::gossip::UtxoSource; use lightning_block_sync::http::{HttpEndpoint, JsonResponse}; use lightning_block_sync::poll::ValidatedBlockHeader; @@ -19,9 +17,12 @@ use lightning_block_sync::rpc::{RpcClient, RpcError}; use lightning_block_sync::{ AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, }; - use serde::Serialize; +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use bitcoin::{BlockHash, FeeRate, Transaction, Txid}; + use std::collections::{HashMap, VecDeque}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 045310198..338fd0d30 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -43,8 +43,6 @@ use std::collections::HashMap; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; - pub(crate) enum WalletSyncStatus { Completed, InProgress { subscribers: tokio::sync::broadcast::Sender> }, @@ -98,6 +96,250 @@ impl WalletSyncStatus { } } +const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; + +pub(super) struct BitcoindChainSource { + api_client: Arc, + header_cache: tokio::sync::Mutex, + latest_chain_tip: RwLock>, + onchain_wallet: Arc, + wallet_polling_status: Mutex, + fee_estimator: Arc, + tx_broadcaster: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, +} + +impl BitcoindChainSource { + pub(crate) fn new_rpc( + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, + ) -> Self { + let api_client = Arc::new(BitcoindClient::new_rpc( + rpc_host.clone(), + rpc_port.clone(), + rpc_user.clone(), + rpc_password.clone(), + )); + + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); + let latest_chain_tip = RwLock::new(None); + let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); + Self { + api_client, + header_cache, + latest_chain_tip, + onchain_wallet, + wallet_polling_status, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger: Arc::clone(&logger), + node_metrics, + } + } + + pub(crate) fn new_rest( + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + rest_client_config: BitcoindRestClientConfig, logger: Arc, + node_metrics: Arc>, + ) -> Self { + let api_client = Arc::new(BitcoindClient::new_rest( + rest_client_config.rest_host, + rest_client_config.rest_port, + rpc_host, + rpc_port, + rpc_user, + rpc_password, + )); + + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); + let latest_chain_tip = RwLock::new(None); + let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); + + Self { + api_client, + header_cache, + latest_chain_tip, + wallet_polling_status, + onchain_wallet, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger: Arc::clone(&logger), + node_metrics, + } + } + + pub(super) fn as_utxo_source(&self) -> Arc { + self.api_client.utxo_source() + } + + pub(super) async fn continuously_sync_wallets( + &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, + channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) { + // First register for the wallet polling status to make sure `Node::sync_wallets` calls + // wait on the result before proceeding. + { + let mut status_lock = self.wallet_polling_status.lock().unwrap(); + if status_lock.register_or_subscribe_pending_sync().is_some() { + debug_assert!(false, "Sync already in progress. This should never happen."); + } + } + + log_info!( + self.logger, + "Starting initial synchronization of chain listeners. This might take a while..", + ); + + let mut backoff = CHAIN_POLLING_INTERVAL_SECS; + const MAX_BACKOFF_SECS: u64 = 300; + + loop { + let channel_manager_best_block_hash = channel_manager.current_best_block().block_hash; + let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; + let onchain_wallet_best_block_hash = + self.onchain_wallet.current_best_block().block_hash; + + let mut chain_listeners = vec![ + ( + onchain_wallet_best_block_hash, + &*self.onchain_wallet as &(dyn Listen + Send + Sync), + ), + (channel_manager_best_block_hash, &*channel_manager as &(dyn Listen + Send + Sync)), + (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), + ]; + + // TODO: Eventually we might want to see if we can synchronize `ChannelMonitor`s + // before giving them to `ChainMonitor` it the first place. However, this isn't + // trivial as we load them on initialization (in the `Builder`) and only gain + // network access during `start`. For now, we just make sure we get the worst known + // block hash and sychronize them via `ChainMonitor`. + if let Some(worst_channel_monitor_block_hash) = chain_monitor + .list_monitors() + .iter() + .flat_map(|(txo, _)| chain_monitor.get_monitor(*txo)) + .map(|m| m.current_best_block()) + .min_by_key(|b| b.height) + .map(|b| b.block_hash) + { + chain_listeners.push(( + worst_channel_monitor_block_hash, + &*chain_monitor as &(dyn Listen + Send + Sync), + )); + } + + let mut locked_header_cache = self.header_cache.lock().await; + let now = SystemTime::now(); + match synchronize_listeners( + self.api_client.as_ref(), + self.config.network, + &mut *locked_header_cache, + chain_listeners.clone(), + ) + .await + { + Ok(chain_tip) => { + { + log_info!( + self.logger, + "Finished synchronizing listeners in {}ms", + now.elapsed().unwrap().as_millis() + ); + *self.latest_chain_tip.write().unwrap() = Some(chain_tip); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + ) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + }); + } + break; + }, + + Err(e) => { + log_error!(self.logger, "Failed to synchronize chain listeners: {:?}", e); + if e.kind() == BlockSourceErrorKind::Transient { + log_info!( + self.logger, + "Transient error syncing chain listeners: {:?}. Retrying in {} seconds.", + e, + backoff + ); + tokio::time::sleep(Duration::from_secs(backoff)).await; + backoff = std::cmp::min(backoff * 2, MAX_BACKOFF_SECS); + } else { + log_error!( + self.logger, + "Persistent error syncing chain listeners: {:?}. Retrying in {} seconds.", + e, + MAX_BACKOFF_SECS + ); + tokio::time::sleep(Duration::from_secs(MAX_BACKOFF_SECS)).await; + } + }, + } + } + + // Now propagate the initial result to unblock waiting subscribers. + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); + + let mut chain_polling_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + chain_polling_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let mut fee_rate_update_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + // When starting up, we just blocked on updating, so skip the first tick. + fee_rate_update_interval.reset(); + fee_rate_update_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + log_info!(self.logger, "Starting continuous polling for chain updates."); + + // Start the polling loop. + loop { + tokio::select! { + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping polling for new chain data.", + ); + return; + } + _ = chain_polling_interval.tick() => { + let _ = self.poll_and_update_listeners( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&output_sweeper) + ).await; + } + _ = fee_rate_update_interval.tick() => { + let _ = self.update_fee_rate_estimates().await; + } + } + } + } +} + pub(crate) struct ChainSource { kind: ChainSourceKind, logger: Arc, @@ -106,19 +348,7 @@ pub(crate) struct ChainSource { enum ChainSourceKind { Esplora(EsploraChainSource), Electrum(ElectrumChainSource), - Bitcoind { - api_client: Arc, - header_cache: tokio::sync::Mutex, - latest_chain_tip: RwLock>, - onchain_wallet: Arc, - wallet_polling_status: Mutex, - fee_estimator: Arc, - tx_broadcaster: Arc, - kv_store: Arc, - config: Arc, - logger: Arc, - node_metrics: Arc>, - }, + Bitcoind(BitcoindChainSource), } impl ChainSource { @@ -171,29 +401,20 @@ impl ChainSource { tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, ) -> Self { - let api_client = Arc::new(BitcoindClient::new_rpc( - rpc_host.clone(), - rpc_port.clone(), - rpc_user.clone(), - rpc_password.clone(), - )); - - let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); - let latest_chain_tip = RwLock::new(None); - let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); - let kind = ChainSourceKind::Bitcoind { - api_client, - header_cache, - latest_chain_tip, + let bitcoind_chain_source = BitcoindChainSource::new_rpc( + rpc_host, + rpc_port, + rpc_user, + rpc_password, onchain_wallet, - wallet_polling_status, fee_estimator, tx_broadcaster, kv_store, config, - logger: Arc::clone(&logger), + Arc::clone(&logger), node_metrics, - }; + ); + let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); Self { kind, logger } } @@ -204,32 +425,21 @@ impl ChainSource { rest_client_config: BitcoindRestClientConfig, logger: Arc, node_metrics: Arc>, ) -> Self { - let api_client = Arc::new(BitcoindClient::new_rest( - rest_client_config.rest_host, - rest_client_config.rest_port, + let bitcoind_chain_source = BitcoindChainSource::new_rest( rpc_host, rpc_port, rpc_user, rpc_password, - )); - - let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); - let latest_chain_tip = RwLock::new(None); - let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); - - let kind = ChainSourceKind::Bitcoind { - api_client, - header_cache, - latest_chain_tip, - wallet_polling_status, onchain_wallet, fee_estimator, tx_broadcaster, kv_store, config, - logger: Arc::clone(&logger), + rest_client_config, + Arc::clone(&logger), node_metrics, - }; + ); + let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); Self { kind, logger } } @@ -256,7 +466,9 @@ impl ChainSource { pub(crate) fn as_utxo_source(&self) -> Option> { match &self.kind { - ChainSourceKind::Bitcoind { api_client, .. } => Some(api_client.utxo_source()), + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + Some(bitcoind_chain_source.as_utxo_source()) + }, _ => None, } } @@ -270,7 +482,7 @@ impl ChainSource { } pub(crate) async fn continuously_sync_wallets( - &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, + &self, stop_sync_receiver: tokio::sync::watch::Receiver<()>, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) { @@ -319,171 +531,15 @@ impl ChainSource { return; } }, - ChainSourceKind::Bitcoind { - api_client, - header_cache, - latest_chain_tip, - onchain_wallet, - wallet_polling_status, - kv_store, - config, - logger, - node_metrics, - .. - } => { - // First register for the wallet polling status to make sure `Node::sync_wallets` calls - // wait on the result before proceeding. - { - let mut status_lock = wallet_polling_status.lock().unwrap(); - if status_lock.register_or_subscribe_pending_sync().is_some() { - debug_assert!(false, "Sync already in progress. This should never happen."); - } - } - - log_info!( - logger, - "Starting initial synchronization of chain listeners. This might take a while..", - ); - - let mut backoff = CHAIN_POLLING_INTERVAL_SECS; - const MAX_BACKOFF_SECS: u64 = 300; - - loop { - let channel_manager_best_block_hash = - channel_manager.current_best_block().block_hash; - let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; - let onchain_wallet_best_block_hash = - onchain_wallet.current_best_block().block_hash; - - let mut chain_listeners = vec![ - ( - onchain_wallet_best_block_hash, - &**onchain_wallet as &(dyn Listen + Send + Sync), - ), - ( - channel_manager_best_block_hash, - &*channel_manager as &(dyn Listen + Send + Sync), - ), - (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), - ]; - - // TODO: Eventually we might want to see if we can synchronize `ChannelMonitor`s - // before giving them to `ChainMonitor` it the first place. However, this isn't - // trivial as we load them on initialization (in the `Builder`) and only gain - // network access during `start`. For now, we just make sure we get the worst known - // block hash and sychronize them via `ChainMonitor`. - if let Some(worst_channel_monitor_block_hash) = chain_monitor - .list_monitors() - .iter() - .flat_map(|(txo, _)| chain_monitor.get_monitor(*txo)) - .map(|m| m.current_best_block()) - .min_by_key(|b| b.height) - .map(|b| b.block_hash) - { - chain_listeners.push(( - worst_channel_monitor_block_hash, - &*chain_monitor as &(dyn Listen + Send + Sync), - )); - } - - let mut locked_header_cache = header_cache.lock().await; - let now = SystemTime::now(); - match synchronize_listeners( - api_client.as_ref(), - config.network, - &mut *locked_header_cache, - chain_listeners.clone(), + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source + .continuously_sync_wallets( + stop_sync_receiver, + channel_manager, + chain_monitor, + output_sweeper, ) .await - { - Ok(chain_tip) => { - { - log_info!( - logger, - "Finished synchronizing listeners in {}ms", - now.elapsed().unwrap().as_millis() - ); - *latest_chain_tip.write().unwrap() = Some(chain_tip); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .unwrap_or_else(|e| { - log_error!(logger, "Failed to persist node metrics: {}", e); - }); - } - break; - }, - - Err(e) => { - log_error!(logger, "Failed to synchronize chain listeners: {:?}", e); - if e.kind() == BlockSourceErrorKind::Transient { - log_info!( - logger, - "Transient error syncing chain listeners: {:?}. Retrying in {} seconds.", - e, - backoff - ); - tokio::time::sleep(Duration::from_secs(backoff)).await; - backoff = std::cmp::min(backoff * 2, MAX_BACKOFF_SECS); - } else { - log_error!( - logger, - "Persistent error syncing chain listeners: {:?}. Retrying in {} seconds.", - e, - MAX_BACKOFF_SECS - ); - tokio::time::sleep(Duration::from_secs(MAX_BACKOFF_SECS)).await; - } - }, - } - } - - // Now propagate the initial result to unblock waiting subscribers. - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); - - let mut chain_polling_interval = - tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); - chain_polling_interval - .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - - let mut fee_rate_update_interval = - tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); - // When starting up, we just blocked on updating, so skip the first tick. - fee_rate_update_interval.reset(); - fee_rate_update_interval - .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - - log_info!(logger, "Starting continuous polling for chain updates."); - - // Start the polling loop. - loop { - tokio::select! { - _ = stop_sync_receiver.changed() => { - log_trace!( - logger, - "Stopping polling for new chain data.", - ); - return; - } - _ = chain_polling_interval.tick() => { - let _ = self.poll_and_update_listeners(Arc::clone(&channel_manager), Arc::clone(&chain_monitor), Arc::clone(&output_sweeper)).await; - } - _ = fee_rate_update_interval.tick() => { - let _ = self.update_fee_rate_estimates().await; - } - } - } }, } } @@ -595,6 +651,128 @@ impl ChainSource { } } +impl BitcoindChainSource { + pub(super) async fn poll_and_update_listeners( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let receiver_res = { + let mut status_lock = self.wallet_polling_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet polling result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); + let chain_tip = if let Some(tip) = latest_chain_tip_opt { + tip + } else { + match validate_best_block_header(self.api_client.as_ref()).await { + Ok(tip) => { + *self.latest_chain_tip.write().unwrap() = Some(tip); + tip + }, + Err(e) => { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + let res = Err(Error::TxSyncFailed); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + }; + + let mut locked_header_cache = self.header_cache.lock().await; + let chain_poller = ChainPoller::new(Arc::clone(&self.api_client), self.config.network); + let chain_listener = ChainListener { + onchain_wallet: Arc::clone(&self.onchain_wallet), + channel_manager: Arc::clone(&channel_manager), + chain_monitor, + output_sweeper, + }; + let mut spv_client = + SpvClient::new(chain_tip, chain_poller, &mut *locked_header_cache, &chain_listener); + + let now = SystemTime::now(); + match spv_client.poll_best_tip().await { + Ok((ChainTip::Better(tip), true)) => { + log_trace!( + self.logger, + "Finished polling best tip in {}ms", + now.elapsed().unwrap().as_millis() + ); + *self.latest_chain_tip.write().unwrap() = Some(tip); + }, + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + let res = Err(Error::TxSyncFailed); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let cur_height = channel_manager.current_best_block().height; + + let now = SystemTime::now(); + let unconfirmed_txids = self.onchain_wallet.get_unconfirmed_txids(); + match self.api_client.get_updated_mempool_transactions(cur_height, unconfirmed_txids).await + { + Ok((unconfirmed_txs, evicted_txids)) => { + log_trace!( + self.logger, + "Finished polling mempool of size {} and {} evicted transactions in {}ms", + unconfirmed_txs.len(), + evicted_txids.len(), + now.elapsed().unwrap().as_millis() + ); + self.onchain_wallet + .apply_mempool_txs(unconfirmed_txs, evicted_txids) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to apply mempool transactions: {:?}", e); + }); + }, + Err(e) => { + log_error!(self.logger, "Failed to poll for mempool transactions: {:?}", e); + let res = Err(Error::TxSyncFailed); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + + let write_res = write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + ); + match write_res { + Ok(()) => (), + Err(e) => { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + let res = Err(Error::PersistenceFailed); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let res = Ok(()); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + res + } +} + impl ChainSource { pub(crate) async fn poll_and_update_listeners( &self, channel_manager: Arc, chain_monitor: Arc, @@ -611,143 +789,132 @@ impl ChainSource { // `sync_onchain_wallet` and `sync_lightning_wallet`. So nothing to do here. unreachable!("Listeners will be synced via transction-based syncing") }, - ChainSourceKind::Bitcoind { - api_client, - header_cache, - latest_chain_tip, - onchain_wallet, - wallet_polling_status, - kv_store, - config, - logger, - node_metrics, - .. - } => { - let receiver_res = { - let mut status_lock = wallet_polling_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); - log_error!(logger, "Failed to receive wallet polling result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - let latest_chain_tip_opt = latest_chain_tip.read().unwrap().clone(); - let chain_tip = if let Some(tip) = latest_chain_tip_opt { - tip - } else { - match validate_best_block_header(api_client.as_ref()).await { - Ok(tip) => { - *latest_chain_tip.write().unwrap() = Some(tip); - tip - }, - Err(e) => { - log_error!(logger, "Failed to poll for chain data: {:?}", e); - let res = Err(Error::TxSyncFailed); - wallet_polling_status - .lock() - .unwrap() - .propagate_result_to_subscribers(res); - return res; - }, - } - }; - - let mut locked_header_cache = header_cache.lock().await; - let chain_poller = ChainPoller::new(Arc::clone(&api_client), config.network); - let chain_listener = ChainListener { - onchain_wallet: Arc::clone(&onchain_wallet), - channel_manager: Arc::clone(&channel_manager), - chain_monitor, - output_sweeper, - }; - let mut spv_client = SpvClient::new( - chain_tip, - chain_poller, - &mut *locked_header_cache, - &chain_listener, - ); + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source + .poll_and_update_listeners(channel_manager, chain_monitor, output_sweeper) + .await + }, + } + } +} - let now = SystemTime::now(); - match spv_client.poll_best_tip().await { - Ok((ChainTip::Better(tip), true)) => { - log_trace!( - logger, - "Finished polling best tip in {}ms", - now.elapsed().unwrap().as_millis() - ); - *latest_chain_tip.write().unwrap() = Some(tip); - }, - Ok(_) => {}, - Err(e) => { - log_error!(logger, "Failed to poll for chain data: {:?}", e); - let res = Err(Error::TxSyncFailed); - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } +impl BitcoindChainSource { + pub(super) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + macro_rules! get_fee_rate_update { + ($estimation_fut: expr) => {{ + let update_res = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + $estimation_fut, + ) + .await + .map_err(|e| { + log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })?; + update_res + }}; + } + let confirmation_targets = get_all_conf_targets(); + + let mut new_fee_rate_cache = HashMap::with_capacity(10); + let now = Instant::now(); + for target in confirmation_targets { + let fee_rate_update_res = match target { + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, + ) => { + let estimation_fut = self.api_client.get_mempool_minimum_fee_rate(); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning(LdkConfirmationTarget::MaximumFeeEstimate) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = + self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning(LdkConfirmationTarget::UrgentOnChainSweep) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = + self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + _ => { + // Otherwise, we default to economical block-target estimate. + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Economical; + let estimation_fut = + self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + }; + + let fee_rate = match (fee_rate_update_res, self.config.network) { + (Ok(rate), _) => rate, + (Err(e), Network::Bitcoin) => { + // Strictly fail on mainnet. + log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); + return Err(Error::FeerateEstimationUpdateFailed); + }, + (Err(e), n) if n == Network::Regtest || n == Network::Signet => { + // On regtest/signet we just fall back to the usual 1 sat/vb == 250 + // sat/kwu default. + log_error!( + self.logger, + "Failed to retrieve fee rate estimates: {}. Falling back to default of 1 sat/vb.", + e, + ); + FeeRate::from_sat_per_kwu(250) + }, + (Err(e), _) => { + // On testnet `estimatesmartfee` can be unreliable so we just skip in + // case of a failure, which will have us falling back to defaults. + log_error!( + self.logger, + "Failed to retrieve fee rate estimates: {}. Falling back to defaults.", + e, + ); + return Ok(()); + }, + }; - let cur_height = channel_manager.current_best_block().height; + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); - let now = SystemTime::now(); - let unconfirmed_txids = onchain_wallet.get_unconfirmed_txids(); - match api_client - .get_updated_mempool_transactions(cur_height, unconfirmed_txids) - .await - { - Ok((unconfirmed_txs, evicted_txids)) => { - log_trace!( - logger, - "Finished polling mempool of size {} and {} evicted transactions in {}ms", - unconfirmed_txs.len(), - evicted_txids.len(), - now.elapsed().unwrap().as_millis() - ); - onchain_wallet - .apply_mempool_txs(unconfirmed_txs, evicted_txids) - .unwrap_or_else(|e| { - log_error!(logger, "Failed to apply mempool transactions: {:?}", e); - }); - }, - Err(e) => { - log_error!(logger, "Failed to poll for mempool transactions: {:?}", e); - let res = Err(Error::TxSyncFailed); - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } + new_fee_rate_cache.insert(target, adjusted_fee_rate); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + log_trace!( + self.logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } - let write_res = write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - ); - match write_res { - Ok(()) => (), - Err(e) => { - log_error!(logger, "Failed to persist node metrics: {}", e); - let res = Err(Error::PersistenceFailed); - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } + if self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { + // We only log if the values changed, as it might be very spammy otherwise. + log_info!( + self.logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + } - let res = Ok(()); - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - res - }, + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; } + + Ok(()) } } @@ -760,135 +927,62 @@ impl ChainSource { ChainSourceKind::Electrum(electrum_chain_source) => { electrum_chain_source.update_fee_rate_estimates().await }, - ChainSourceKind::Bitcoind { - api_client, - fee_estimator, - config, - kv_store, - logger, - node_metrics, - .. - } => { - macro_rules! get_fee_rate_update { - ($estimation_fut: expr) => {{ - let update_res = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), - $estimation_fut, - ) - .await - .map_err(|e| { - log_error!(logger, "Updating fee rate estimates timed out: {}", e); - Error::FeerateEstimationUpdateTimeout - })?; - update_res - }}; - } - let confirmation_targets = get_all_conf_targets(); - - let mut new_fee_rate_cache = HashMap::with_capacity(10); - let now = Instant::now(); - for target in confirmation_targets { - let fee_rate_update_res = match target { - ConfirmationTarget::Lightning( - LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, - ) => { - let estimation_fut = api_client.get_mempool_minimum_fee_rate(); - get_fee_rate_update!(estimation_fut) - }, - ConfirmationTarget::Lightning( - LdkConfirmationTarget::MaximumFeeEstimate, - ) => { - let num_blocks = get_num_block_defaults_for_target(target); - let estimation_mode = FeeRateEstimationMode::Conservative; - let estimation_fut = - api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); - get_fee_rate_update!(estimation_fut) - }, - ConfirmationTarget::Lightning( - LdkConfirmationTarget::UrgentOnChainSweep, - ) => { - let num_blocks = get_num_block_defaults_for_target(target); - let estimation_mode = FeeRateEstimationMode::Conservative; - let estimation_fut = - api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); - get_fee_rate_update!(estimation_fut) - }, - _ => { - // Otherwise, we default to economical block-target estimate. - let num_blocks = get_num_block_defaults_for_target(target); - let estimation_mode = FeeRateEstimationMode::Economical; - let estimation_fut = - api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); - get_fee_rate_update!(estimation_fut) - }, - }; - - let fee_rate = match (fee_rate_update_res, config.network) { - (Ok(rate), _) => rate, - (Err(e), Network::Bitcoin) => { - // Strictly fail on mainnet. - log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); - return Err(Error::FeerateEstimationUpdateFailed); + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source.update_fee_rate_estimates().await + }, + } + } +} + +impl BitcoindChainSource { + pub(crate) async fn process_broadcast_queue(&self) { + // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 + // features, we should eventually switch to use `submitpackage` via the + // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual + // transactions. + let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in &next_package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + self.api_client.broadcast_transaction(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(id) => { + debug_assert_eq!(id, txid); + log_trace!(self.logger, "Successfully broadcast transaction {}", txid); }, - (Err(e), n) if n == Network::Regtest || n == Network::Signet => { - // On regtest/signet we just fall back to the usual 1 sat/vb == 250 - // sat/kwu default. + Err(e) => { log_error!( - logger, - "Failed to retrieve fee rate estimates: {}. Falling back to default of 1 sat/vb.", - e, + self.logger, + "Failed to broadcast transaction {}: {}", + txid, + e ); - FeeRate::from_sat_per_kwu(250) - }, - (Err(e), _) => { - // On testnet `estimatesmartfee` can be unreliable so we just skip in - // case of a failure, which will have us falling back to defaults. - log_error!( - logger, - "Failed to retrieve fee rate estimates: {}. Falling back to defaults.", - e, + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) ); - return Ok(()); }, - }; - - // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that - // require some post-estimation adjustments to the fee rates, which we do here. - let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); - - new_fee_rate_cache.insert(target, adjusted_fee_rate); - - log_trace!( - logger, - "Fee rate estimation updated for {:?}: {} sats/kwu", - target, - adjusted_fee_rate.to_sat_per_kwu(), - ); - } - - if fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { - // We only log if the values changed, as it might be very spammy otherwise. - log_info!( - logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - } - - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; + }, + Err(e) => { + log_error!( + self.logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, } - - Ok(()) - }, + } } } } @@ -902,59 +996,8 @@ impl ChainSource { ChainSourceKind::Electrum(electrum_chain_source) => { electrum_chain_source.process_broadcast_queue().await }, - ChainSourceKind::Bitcoind { api_client, tx_broadcaster, logger, .. } => { - // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 - // features, we should eventually switch to use `submitpackage` via the - // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual - // transactions. - let mut receiver = tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let txid = tx.compute_txid(); - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - api_client.broadcast_transaction(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(id) => { - debug_assert_eq!(id, txid); - log_trace!( - logger, - "Successfully broadcast transaction {}", - txid - ); - }, - Err(e) => { - log_error!( - logger, - "Failed to broadcast transaction {}: {}", - txid, - e - ); - log_trace!( - logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - }, - Err(e) => { - log_error!( - logger, - "Failed to broadcast transaction due to timeout {}: {}", - txid, - e - ); - log_trace!( - logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - } - } - } + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source.process_broadcast_queue().await }, } } From a7e54d84ca596249bc31c6106e206e2a85b215be Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 29 Jul 2025 10:04:05 +0200 Subject: [PATCH 034/184] Move `BitcoindChainSource` type to `chain::bitcoind` module --- src/chain/bitcoind.rs | 559 ++++++++++++++++++++++++++++++++++++++++- src/chain/mod.rs | 566 +----------------------------------------- 2 files changed, 563 insertions(+), 562 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 52dad7741..b87ee13ed 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -5,27 +5,578 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; +use super::WalletSyncStatus; +use crate::config::{ + BitcoindRestClientConfig, Config, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, +}; +use crate::fee_estimator::{ + apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, + ConfirmationTarget, OnchainFeeEstimator, +}; +use crate::io::utils::write_node_metrics; +use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::{Error, NodeMetrics}; + +use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; use lightning::chain::Listen; +use lightning::util::ser::Writeable; use lightning_block_sync::gossip::UtxoSource; use lightning_block_sync::http::{HttpEndpoint, JsonResponse}; -use lightning_block_sync::poll::ValidatedBlockHeader; +use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; +use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; use lightning_block_sync::rest::RestClient; use lightning_block_sync::rpc::{RpcClient, RpcError}; use lightning_block_sync::{ AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, }; +use lightning_block_sync::{BlockSourceErrorKind, SpvClient}; + use serde::Serialize; use base64::prelude::BASE64_STANDARD; use base64::Engine; -use bitcoin::{BlockHash, FeeRate, Transaction, Txid}; +use bitcoin::{BlockHash, FeeRate, Network, Transaction, Txid}; use std::collections::{HashMap, VecDeque}; use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; + +pub(super) struct BitcoindChainSource { + api_client: Arc, + header_cache: tokio::sync::Mutex, + latest_chain_tip: RwLock>, + onchain_wallet: Arc, + wallet_polling_status: Mutex, + fee_estimator: Arc, + tx_broadcaster: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, +} + +impl BitcoindChainSource { + pub(crate) fn new_rpc( + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, + ) -> Self { + let api_client = Arc::new(BitcoindClient::new_rpc( + rpc_host.clone(), + rpc_port.clone(), + rpc_user.clone(), + rpc_password.clone(), + )); + + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); + let latest_chain_tip = RwLock::new(None); + let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); + Self { + api_client, + header_cache, + latest_chain_tip, + onchain_wallet, + wallet_polling_status, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger: Arc::clone(&logger), + node_metrics, + } + } + + pub(crate) fn new_rest( + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + rest_client_config: BitcoindRestClientConfig, logger: Arc, + node_metrics: Arc>, + ) -> Self { + let api_client = Arc::new(BitcoindClient::new_rest( + rest_client_config.rest_host, + rest_client_config.rest_port, + rpc_host, + rpc_port, + rpc_user, + rpc_password, + )); + + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); + let latest_chain_tip = RwLock::new(None); + let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); + + Self { + api_client, + header_cache, + latest_chain_tip, + wallet_polling_status, + onchain_wallet, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger: Arc::clone(&logger), + node_metrics, + } + } + + pub(super) fn as_utxo_source(&self) -> Arc { + self.api_client.utxo_source() + } + + pub(super) async fn continuously_sync_wallets( + &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, + channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) { + // First register for the wallet polling status to make sure `Node::sync_wallets` calls + // wait on the result before proceeding. + { + let mut status_lock = self.wallet_polling_status.lock().unwrap(); + if status_lock.register_or_subscribe_pending_sync().is_some() { + debug_assert!(false, "Sync already in progress. This should never happen."); + } + } + + log_info!( + self.logger, + "Starting initial synchronization of chain listeners. This might take a while..", + ); + + let mut backoff = CHAIN_POLLING_INTERVAL_SECS; + const MAX_BACKOFF_SECS: u64 = 300; + + loop { + let channel_manager_best_block_hash = channel_manager.current_best_block().block_hash; + let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; + let onchain_wallet_best_block_hash = + self.onchain_wallet.current_best_block().block_hash; + + let mut chain_listeners = vec![ + ( + onchain_wallet_best_block_hash, + &*self.onchain_wallet as &(dyn Listen + Send + Sync), + ), + (channel_manager_best_block_hash, &*channel_manager as &(dyn Listen + Send + Sync)), + (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), + ]; + + // TODO: Eventually we might want to see if we can synchronize `ChannelMonitor`s + // before giving them to `ChainMonitor` it the first place. However, this isn't + // trivial as we load them on initialization (in the `Builder`) and only gain + // network access during `start`. For now, we just make sure we get the worst known + // block hash and sychronize them via `ChainMonitor`. + if let Some(worst_channel_monitor_block_hash) = chain_monitor + .list_monitors() + .iter() + .flat_map(|(txo, _)| chain_monitor.get_monitor(*txo)) + .map(|m| m.current_best_block()) + .min_by_key(|b| b.height) + .map(|b| b.block_hash) + { + chain_listeners.push(( + worst_channel_monitor_block_hash, + &*chain_monitor as &(dyn Listen + Send + Sync), + )); + } + + let mut locked_header_cache = self.header_cache.lock().await; + let now = SystemTime::now(); + match synchronize_listeners( + self.api_client.as_ref(), + self.config.network, + &mut *locked_header_cache, + chain_listeners.clone(), + ) + .await + { + Ok(chain_tip) => { + { + log_info!( + self.logger, + "Finished synchronizing listeners in {}ms", + now.elapsed().unwrap().as_millis() + ); + *self.latest_chain_tip.write().unwrap() = Some(chain_tip); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + ) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + }); + } + break; + }, + + Err(e) => { + log_error!(self.logger, "Failed to synchronize chain listeners: {:?}", e); + if e.kind() == BlockSourceErrorKind::Transient { + log_info!( + self.logger, + "Transient error syncing chain listeners: {:?}. Retrying in {} seconds.", + e, + backoff + ); + tokio::time::sleep(Duration::from_secs(backoff)).await; + backoff = std::cmp::min(backoff * 2, MAX_BACKOFF_SECS); + } else { + log_error!( + self.logger, + "Persistent error syncing chain listeners: {:?}. Retrying in {} seconds.", + e, + MAX_BACKOFF_SECS + ); + tokio::time::sleep(Duration::from_secs(MAX_BACKOFF_SECS)).await; + } + }, + } + } + + // Now propagate the initial result to unblock waiting subscribers. + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); + + let mut chain_polling_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + chain_polling_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let mut fee_rate_update_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + // When starting up, we just blocked on updating, so skip the first tick. + fee_rate_update_interval.reset(); + fee_rate_update_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + log_info!(self.logger, "Starting continuous polling for chain updates."); + + // Start the polling loop. + loop { + tokio::select! { + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping polling for new chain data.", + ); + return; + } + _ = chain_polling_interval.tick() => { + let _ = self.poll_and_update_listeners( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&output_sweeper) + ).await; + } + _ = fee_rate_update_interval.tick() => { + let _ = self.update_fee_rate_estimates().await; + } + } + } + } + + pub(super) async fn poll_and_update_listeners( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let receiver_res = { + let mut status_lock = self.wallet_polling_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet polling result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); + let chain_tip = if let Some(tip) = latest_chain_tip_opt { + tip + } else { + match validate_best_block_header(self.api_client.as_ref()).await { + Ok(tip) => { + *self.latest_chain_tip.write().unwrap() = Some(tip); + tip + }, + Err(e) => { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + let res = Err(Error::TxSyncFailed); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + }; + + let mut locked_header_cache = self.header_cache.lock().await; + let chain_poller = ChainPoller::new(Arc::clone(&self.api_client), self.config.network); + let chain_listener = ChainListener { + onchain_wallet: Arc::clone(&self.onchain_wallet), + channel_manager: Arc::clone(&channel_manager), + chain_monitor, + output_sweeper, + }; + let mut spv_client = + SpvClient::new(chain_tip, chain_poller, &mut *locked_header_cache, &chain_listener); + + let now = SystemTime::now(); + match spv_client.poll_best_tip().await { + Ok((ChainTip::Better(tip), true)) => { + log_trace!( + self.logger, + "Finished polling best tip in {}ms", + now.elapsed().unwrap().as_millis() + ); + *self.latest_chain_tip.write().unwrap() = Some(tip); + }, + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + let res = Err(Error::TxSyncFailed); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let cur_height = channel_manager.current_best_block().height; + + let now = SystemTime::now(); + let unconfirmed_txids = self.onchain_wallet.get_unconfirmed_txids(); + match self.api_client.get_updated_mempool_transactions(cur_height, unconfirmed_txids).await + { + Ok((unconfirmed_txs, evicted_txids)) => { + log_trace!( + self.logger, + "Finished polling mempool of size {} and {} evicted transactions in {}ms", + unconfirmed_txs.len(), + evicted_txids.len(), + now.elapsed().unwrap().as_millis() + ); + self.onchain_wallet + .apply_mempool_txs(unconfirmed_txs, evicted_txids) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to apply mempool transactions: {:?}", e); + }); + }, + Err(e) => { + log_error!(self.logger, "Failed to poll for mempool transactions: {:?}", e); + let res = Err(Error::TxSyncFailed); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + + let write_res = write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + ); + match write_res { + Ok(()) => (), + Err(e) => { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + let res = Err(Error::PersistenceFailed); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let res = Ok(()); + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + res + } + + pub(super) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + macro_rules! get_fee_rate_update { + ($estimation_fut: expr) => {{ + let update_res = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + $estimation_fut, + ) + .await + .map_err(|e| { + log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })?; + update_res + }}; + } + let confirmation_targets = get_all_conf_targets(); + + let mut new_fee_rate_cache = HashMap::with_capacity(10); + let now = Instant::now(); + for target in confirmation_targets { + let fee_rate_update_res = match target { + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, + ) => { + let estimation_fut = self.api_client.get_mempool_minimum_fee_rate(); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning(LdkConfirmationTarget::MaximumFeeEstimate) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = + self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning(LdkConfirmationTarget::UrgentOnChainSweep) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = + self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + _ => { + // Otherwise, we default to economical block-target estimate. + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Economical; + let estimation_fut = + self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + }; + + let fee_rate = match (fee_rate_update_res, self.config.network) { + (Ok(rate), _) => rate, + (Err(e), Network::Bitcoin) => { + // Strictly fail on mainnet. + log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); + return Err(Error::FeerateEstimationUpdateFailed); + }, + (Err(e), n) if n == Network::Regtest || n == Network::Signet => { + // On regtest/signet we just fall back to the usual 1 sat/vb == 250 + // sat/kwu default. + log_error!( + self.logger, + "Failed to retrieve fee rate estimates: {}. Falling back to default of 1 sat/vb.", + e, + ); + FeeRate::from_sat_per_kwu(250) + }, + (Err(e), _) => { + // On testnet `estimatesmartfee` can be unreliable so we just skip in + // case of a failure, which will have us falling back to defaults. + log_error!( + self.logger, + "Failed to retrieve fee rate estimates: {}. Falling back to defaults.", + e, + ); + return Ok(()); + }, + }; + + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); + + new_fee_rate_cache.insert(target, adjusted_fee_rate); + + log_trace!( + self.logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } + + if self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { + // We only log if the values changed, as it might be very spammy otherwise. + log_info!( + self.logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + } + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + Ok(()) + } + + pub(crate) async fn process_broadcast_queue(&self) { + // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 + // features, we should eventually switch to use `submitpackage` via the + // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual + // transactions. + let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in &next_package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + self.api_client.broadcast_transaction(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(id) => { + debug_assert_eq!(id, txid); + log_trace!(self.logger, "Successfully broadcast transaction {}", txid); + }, + Err(e) => { + log_error!( + self.logger, + "Failed to broadcast transaction {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + }, + Err(e) => { + log_error!( + self.logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + } + } + } + } +} pub enum BitcoindClient { Rpc { diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 338fd0d30..d756301f3 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -9,39 +9,28 @@ mod bitcoind; mod electrum; mod esplora; -use crate::chain::bitcoind::{ - BitcoindClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, -}; +use crate::chain::bitcoind::BitcoindChainSource; use crate::chain::electrum::ElectrumChainSource; use crate::chain::esplora::EsploraChainSource; use crate::config::{ BackgroundSyncConfig, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, - FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, - TX_BROADCAST_TIMEOUT_SECS, WALLET_SYNC_INTERVAL_MINIMUM_SECS, -}; -use crate::fee_estimator::{ - apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, - ConfirmationTarget, OnchainFeeEstimator, + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; +use crate::fee_estimator::OnchainFeeEstimator; use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::logger::{log_info, log_trace, LdkLogger, Logger}; use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; -use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; -use lightning::chain::{Filter, Listen}; -use lightning::util::ser::Writeable; +use lightning::chain::Filter; use lightning_block_sync::gossip::UtxoSource; -use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; -use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; -use lightning_block_sync::{BlockSourceErrorKind, SpvClient}; -use bitcoin::{FeeRate, Network, Script, Txid}; +use bitcoin::{Script, Txid}; use std::collections::HashMap; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use std::sync::{Arc, RwLock}; +use std::time::Duration; pub(crate) enum WalletSyncStatus { Completed, @@ -96,250 +85,6 @@ impl WalletSyncStatus { } } -const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; - -pub(super) struct BitcoindChainSource { - api_client: Arc, - header_cache: tokio::sync::Mutex, - latest_chain_tip: RwLock>, - onchain_wallet: Arc, - wallet_polling_status: Mutex, - fee_estimator: Arc, - tx_broadcaster: Arc, - kv_store: Arc, - config: Arc, - logger: Arc, - node_metrics: Arc>, -} - -impl BitcoindChainSource { - pub(crate) fn new_rpc( - rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - logger: Arc, node_metrics: Arc>, - ) -> Self { - let api_client = Arc::new(BitcoindClient::new_rpc( - rpc_host.clone(), - rpc_port.clone(), - rpc_user.clone(), - rpc_password.clone(), - )); - - let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); - let latest_chain_tip = RwLock::new(None); - let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); - Self { - api_client, - header_cache, - latest_chain_tip, - onchain_wallet, - wallet_polling_status, - fee_estimator, - tx_broadcaster, - kv_store, - config, - logger: Arc::clone(&logger), - node_metrics, - } - } - - pub(crate) fn new_rest( - rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - rest_client_config: BitcoindRestClientConfig, logger: Arc, - node_metrics: Arc>, - ) -> Self { - let api_client = Arc::new(BitcoindClient::new_rest( - rest_client_config.rest_host, - rest_client_config.rest_port, - rpc_host, - rpc_port, - rpc_user, - rpc_password, - )); - - let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); - let latest_chain_tip = RwLock::new(None); - let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); - - Self { - api_client, - header_cache, - latest_chain_tip, - wallet_polling_status, - onchain_wallet, - fee_estimator, - tx_broadcaster, - kv_store, - config, - logger: Arc::clone(&logger), - node_metrics, - } - } - - pub(super) fn as_utxo_source(&self) -> Arc { - self.api_client.utxo_source() - } - - pub(super) async fn continuously_sync_wallets( - &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, - channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, - ) { - // First register for the wallet polling status to make sure `Node::sync_wallets` calls - // wait on the result before proceeding. - { - let mut status_lock = self.wallet_polling_status.lock().unwrap(); - if status_lock.register_or_subscribe_pending_sync().is_some() { - debug_assert!(false, "Sync already in progress. This should never happen."); - } - } - - log_info!( - self.logger, - "Starting initial synchronization of chain listeners. This might take a while..", - ); - - let mut backoff = CHAIN_POLLING_INTERVAL_SECS; - const MAX_BACKOFF_SECS: u64 = 300; - - loop { - let channel_manager_best_block_hash = channel_manager.current_best_block().block_hash; - let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; - let onchain_wallet_best_block_hash = - self.onchain_wallet.current_best_block().block_hash; - - let mut chain_listeners = vec![ - ( - onchain_wallet_best_block_hash, - &*self.onchain_wallet as &(dyn Listen + Send + Sync), - ), - (channel_manager_best_block_hash, &*channel_manager as &(dyn Listen + Send + Sync)), - (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), - ]; - - // TODO: Eventually we might want to see if we can synchronize `ChannelMonitor`s - // before giving them to `ChainMonitor` it the first place. However, this isn't - // trivial as we load them on initialization (in the `Builder`) and only gain - // network access during `start`. For now, we just make sure we get the worst known - // block hash and sychronize them via `ChainMonitor`. - if let Some(worst_channel_monitor_block_hash) = chain_monitor - .list_monitors() - .iter() - .flat_map(|(txo, _)| chain_monitor.get_monitor(*txo)) - .map(|m| m.current_best_block()) - .min_by_key(|b| b.height) - .map(|b| b.block_hash) - { - chain_listeners.push(( - worst_channel_monitor_block_hash, - &*chain_monitor as &(dyn Listen + Send + Sync), - )); - } - - let mut locked_header_cache = self.header_cache.lock().await; - let now = SystemTime::now(); - match synchronize_listeners( - self.api_client.as_ref(), - self.config.network, - &mut *locked_header_cache, - chain_listeners.clone(), - ) - .await - { - Ok(chain_tip) => { - { - log_info!( - self.logger, - "Finished synchronizing listeners in {}ms", - now.elapsed().unwrap().as_millis() - ); - *self.latest_chain_tip.write().unwrap() = Some(chain_tip); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - ) - .unwrap_or_else(|e| { - log_error!(self.logger, "Failed to persist node metrics: {}", e); - }); - } - break; - }, - - Err(e) => { - log_error!(self.logger, "Failed to synchronize chain listeners: {:?}", e); - if e.kind() == BlockSourceErrorKind::Transient { - log_info!( - self.logger, - "Transient error syncing chain listeners: {:?}. Retrying in {} seconds.", - e, - backoff - ); - tokio::time::sleep(Duration::from_secs(backoff)).await; - backoff = std::cmp::min(backoff * 2, MAX_BACKOFF_SECS); - } else { - log_error!( - self.logger, - "Persistent error syncing chain listeners: {:?}. Retrying in {} seconds.", - e, - MAX_BACKOFF_SECS - ); - tokio::time::sleep(Duration::from_secs(MAX_BACKOFF_SECS)).await; - } - }, - } - } - - // Now propagate the initial result to unblock waiting subscribers. - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); - - let mut chain_polling_interval = - tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); - chain_polling_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - - let mut fee_rate_update_interval = - tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); - // When starting up, we just blocked on updating, so skip the first tick. - fee_rate_update_interval.reset(); - fee_rate_update_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - - log_info!(self.logger, "Starting continuous polling for chain updates."); - - // Start the polling loop. - loop { - tokio::select! { - _ = stop_sync_receiver.changed() => { - log_trace!( - self.logger, - "Stopping polling for new chain data.", - ); - return; - } - _ = chain_polling_interval.tick() => { - let _ = self.poll_and_update_listeners( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&output_sweeper) - ).await; - } - _ = fee_rate_update_interval.tick() => { - let _ = self.update_fee_rate_estimates().await; - } - } - } - } -} - pub(crate) struct ChainSource { kind: ChainSourceKind, logger: Arc, @@ -651,128 +396,6 @@ impl ChainSource { } } -impl BitcoindChainSource { - pub(super) async fn poll_and_update_listeners( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, - ) -> Result<(), Error> { - let receiver_res = { - let mut status_lock = self.wallet_polling_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - - if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); - log_error!(self.logger, "Failed to receive wallet polling result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); - let chain_tip = if let Some(tip) = latest_chain_tip_opt { - tip - } else { - match validate_best_block_header(self.api_client.as_ref()).await { - Ok(tip) => { - *self.latest_chain_tip.write().unwrap() = Some(tip); - tip - }, - Err(e) => { - log_error!(self.logger, "Failed to poll for chain data: {:?}", e); - let res = Err(Error::TxSyncFailed); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } - }; - - let mut locked_header_cache = self.header_cache.lock().await; - let chain_poller = ChainPoller::new(Arc::clone(&self.api_client), self.config.network); - let chain_listener = ChainListener { - onchain_wallet: Arc::clone(&self.onchain_wallet), - channel_manager: Arc::clone(&channel_manager), - chain_monitor, - output_sweeper, - }; - let mut spv_client = - SpvClient::new(chain_tip, chain_poller, &mut *locked_header_cache, &chain_listener); - - let now = SystemTime::now(); - match spv_client.poll_best_tip().await { - Ok((ChainTip::Better(tip), true)) => { - log_trace!( - self.logger, - "Finished polling best tip in {}ms", - now.elapsed().unwrap().as_millis() - ); - *self.latest_chain_tip.write().unwrap() = Some(tip); - }, - Ok(_) => {}, - Err(e) => { - log_error!(self.logger, "Failed to poll for chain data: {:?}", e); - let res = Err(Error::TxSyncFailed); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } - - let cur_height = channel_manager.current_best_block().height; - - let now = SystemTime::now(); - let unconfirmed_txids = self.onchain_wallet.get_unconfirmed_txids(); - match self.api_client.get_updated_mempool_transactions(cur_height, unconfirmed_txids).await - { - Ok((unconfirmed_txs, evicted_txids)) => { - log_trace!( - self.logger, - "Finished polling mempool of size {} and {} evicted transactions in {}ms", - unconfirmed_txs.len(), - evicted_txids.len(), - now.elapsed().unwrap().as_millis() - ); - self.onchain_wallet - .apply_mempool_txs(unconfirmed_txs, evicted_txids) - .unwrap_or_else(|e| { - log_error!(self.logger, "Failed to apply mempool transactions: {:?}", e); - }); - }, - Err(e) => { - log_error!(self.logger, "Failed to poll for mempool transactions: {:?}", e); - let res = Err(Error::TxSyncFailed); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } - - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - - let write_res = write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - ); - match write_res { - Ok(()) => (), - Err(e) => { - log_error!(self.logger, "Failed to persist node metrics: {}", e); - let res = Err(Error::PersistenceFailed); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } - - let res = Ok(()); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - res - } -} - impl ChainSource { pub(crate) async fn poll_and_update_listeners( &self, channel_manager: Arc, chain_monitor: Arc, @@ -798,126 +421,6 @@ impl ChainSource { } } -impl BitcoindChainSource { - pub(super) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { - macro_rules! get_fee_rate_update { - ($estimation_fut: expr) => {{ - let update_res = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), - $estimation_fut, - ) - .await - .map_err(|e| { - log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); - Error::FeerateEstimationUpdateTimeout - })?; - update_res - }}; - } - let confirmation_targets = get_all_conf_targets(); - - let mut new_fee_rate_cache = HashMap::with_capacity(10); - let now = Instant::now(); - for target in confirmation_targets { - let fee_rate_update_res = match target { - ConfirmationTarget::Lightning( - LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, - ) => { - let estimation_fut = self.api_client.get_mempool_minimum_fee_rate(); - get_fee_rate_update!(estimation_fut) - }, - ConfirmationTarget::Lightning(LdkConfirmationTarget::MaximumFeeEstimate) => { - let num_blocks = get_num_block_defaults_for_target(target); - let estimation_mode = FeeRateEstimationMode::Conservative; - let estimation_fut = - self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); - get_fee_rate_update!(estimation_fut) - }, - ConfirmationTarget::Lightning(LdkConfirmationTarget::UrgentOnChainSweep) => { - let num_blocks = get_num_block_defaults_for_target(target); - let estimation_mode = FeeRateEstimationMode::Conservative; - let estimation_fut = - self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); - get_fee_rate_update!(estimation_fut) - }, - _ => { - // Otherwise, we default to economical block-target estimate. - let num_blocks = get_num_block_defaults_for_target(target); - let estimation_mode = FeeRateEstimationMode::Economical; - let estimation_fut = - self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); - get_fee_rate_update!(estimation_fut) - }, - }; - - let fee_rate = match (fee_rate_update_res, self.config.network) { - (Ok(rate), _) => rate, - (Err(e), Network::Bitcoin) => { - // Strictly fail on mainnet. - log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); - return Err(Error::FeerateEstimationUpdateFailed); - }, - (Err(e), n) if n == Network::Regtest || n == Network::Signet => { - // On regtest/signet we just fall back to the usual 1 sat/vb == 250 - // sat/kwu default. - log_error!( - self.logger, - "Failed to retrieve fee rate estimates: {}. Falling back to default of 1 sat/vb.", - e, - ); - FeeRate::from_sat_per_kwu(250) - }, - (Err(e), _) => { - // On testnet `estimatesmartfee` can be unreliable so we just skip in - // case of a failure, which will have us falling back to defaults. - log_error!( - self.logger, - "Failed to retrieve fee rate estimates: {}. Falling back to defaults.", - e, - ); - return Ok(()); - }, - }; - - // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that - // require some post-estimation adjustments to the fee rates, which we do here. - let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); - - new_fee_rate_cache.insert(target, adjusted_fee_rate); - - log_trace!( - self.logger, - "Fee rate estimation updated for {:?}: {} sats/kwu", - target, - adjusted_fee_rate.to_sat_per_kwu(), - ); - } - - if self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { - // We only log if the values changed, as it might be very spammy otherwise. - log_info!( - self.logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - } - - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; - } - - Ok(()) - } -} - impl ChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { match &self.kind { @@ -934,59 +437,6 @@ impl ChainSource { } } -impl BitcoindChainSource { - pub(crate) async fn process_broadcast_queue(&self) { - // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 - // features, we should eventually switch to use `submitpackage` via the - // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual - // transactions. - let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let txid = tx.compute_txid(); - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - self.api_client.broadcast_transaction(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(id) => { - debug_assert_eq!(id, txid); - log_trace!(self.logger, "Successfully broadcast transaction {}", txid); - }, - Err(e) => { - log_error!( - self.logger, - "Failed to broadcast transaction {}: {}", - txid, - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - }, - Err(e) => { - log_error!( - self.logger, - "Failed to broadcast transaction due to timeout {}: {}", - txid, - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - } - } - } - } -} - impl ChainSource { pub(crate) async fn process_broadcast_queue(&self) { match &self.kind { From 7cb1d34e09d7ab8ca050528ace1363713ed0d914 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 29 Jul 2025 10:06:18 +0200 Subject: [PATCH 035/184] Drop intermittent `impl` blocks on `ChainSource` .. now that we don't need them anymore for review, we drop the extra `impl` blocks again. --- src/chain/mod.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index d756301f3..91cce1fe3 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -347,9 +347,7 @@ impl ChainSource { } } } -} -impl ChainSource { // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { @@ -367,9 +365,7 @@ impl ChainSource { }, } } -} -impl ChainSource { // Synchronize the Lightning wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) pub(crate) async fn sync_lightning_wallet( @@ -394,9 +390,7 @@ impl ChainSource { }, } } -} -impl ChainSource { pub(crate) async fn poll_and_update_listeners( &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, @@ -419,9 +413,7 @@ impl ChainSource { }, } } -} -impl ChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { @@ -435,9 +427,7 @@ impl ChainSource { }, } } -} -impl ChainSource { pub(crate) async fn process_broadcast_queue(&self) { match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { From fb34a2703278e0cc316f940b2e5916c8dbd09538 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 31 Jul 2025 14:26:27 +0200 Subject: [PATCH 036/184] Move inner code to `sync_lightning_wallet_inner` for Esplora Previously, we might have overlooked some cases that would exit the method and bubble up an error via `?` instead of propagating to all subscribers. Here, we split out the code to an inner method to ensure we always propagate. --- src/chain/esplora.rs | 115 +++++++++++++++++++++++-------------------- 1 file changed, 61 insertions(+), 54 deletions(-) diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index 3a911394c..a93276018 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -212,15 +212,6 @@ impl EsploraChainSource { &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { - let sync_cman = Arc::clone(&channel_manager); - let sync_cmon = Arc::clone(&chain_monitor); - let sync_sweeper = Arc::clone(&output_sweeper); - let confirmables = vec![ - &*sync_cman as &(dyn Confirm + Sync + Send), - &*sync_cmon as &(dyn Confirm + Sync + Send), - &*sync_sweeper as &(dyn Confirm + Sync + Send), - ]; - let receiver_res = { let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); status_lock.register_or_subscribe_pending_sync() @@ -233,58 +224,74 @@ impl EsploraChainSource { Error::WalletOperationFailed })?; } - let res = { - let timeout_fut = tokio::time::timeout( - Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), - self.tx_sync.sync(confirmables), - ); - let now = Instant::now(); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_info!( - self.logger, - "Sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; - } - - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), + let res = + self.sync_lightning_wallet_inner(channel_manager, chain_monitor, output_sweeper).await; + + self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + async fn sync_lightning_wallet_inner( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + &*sync_cman as &(dyn Confirm + Sync + Send), + &*sync_cmon as &(dyn Confirm + Sync + Send), + &*sync_sweeper as &(dyn Confirm + Sync + Send), + ]; + + let timeout_fut = tokio::time::timeout( + Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), + self.tx_sync.sync(confirmables), + ); + let now = Instant::now(); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_info!( + self.logger, + "Sync of Lightning wallet finished in {}ms.", + now.elapsed().as_millis() + ); + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, Arc::clone(&self.kv_store), Arc::clone(&self.logger), - Arc::clone(&self.node_metrics), )?; - Ok(()) - }, - Err(e) => { - log_error!(self.logger, "Sync of Lightning wallet failed: {}", e); - Err(e.into()) - }, + } + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.node_metrics), + )?; + Ok(()) }, Err(e) => { - log_error!(self.logger, "Lightning wallet sync timed out: {}", e); - Err(Error::TxSyncTimeout) + log_error!(self.logger, "Sync of Lightning wallet failed: {}", e); + Err(e.into()) }, - } - }; - - self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res + }, + Err(e) => { + log_error!(self.logger, "Lightning wallet sync timed out: {}", e); + Err(Error::TxSyncTimeout) + }, + } } pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { From c1157a340ebac5bd53f7f262decfbf6529d976a3 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 31 Jul 2025 14:30:54 +0200 Subject: [PATCH 037/184] Move inner code to `sync_onchain_wallet_inner` for Esplora Previously, we might have overlooked some cases that would exit the method and bubble up an error via `?` instead of propagating to all subscribers. Here, we split out the code to an inner method to ensure we always propagate. --- src/chain/esplora.rs | 182 ++++++++++++++++++++++--------------------- 1 file changed, 92 insertions(+), 90 deletions(-) diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index a93276018..5932426b7 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -112,102 +112,104 @@ impl EsploraChainSource { })?; } - let res = { - // If this is our first sync, do a full scan with the configured gap limit. - // Otherwise just do an incremental sync. - let incremental_sync = - self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); - - macro_rules! get_and_apply_wallet_update { - ($sync_future: expr) => {{ - let now = Instant::now(); - match $sync_future.await { - Ok(res) => match res { - Ok(update) => match self.onchain_wallet.apply_update(update) { - Ok(()) => { - log_info!( - self.logger, - "{} of on-chain wallet finished in {}ms.", - if incremental_sync { "Incremental sync" } else { "Sync" }, - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger) - )?; - } - Ok(()) - }, - Err(e) => Err(e), - }, - Err(e) => match *e { - esplora_client::Error::Reqwest(he) => { - log_error!( - self.logger, - "{} of on-chain wallet failed due to HTTP connection error: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - he - ); - Err(Error::WalletOperationFailed) - }, - _ => { - log_error!( - self.logger, - "{} of on-chain wallet failed due to Esplora error: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - e - ); - Err(Error::WalletOperationFailed) - }, - }, - }, - Err(e) => { - log_error!( - self.logger, - "{} of on-chain wallet timed out: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - e - ); - Err(Error::WalletOperationTimeout) - }, - } - }} - } - - if incremental_sync { - let sync_request = self.onchain_wallet.get_incremental_sync_request(); - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), - ); - get_and_apply_wallet_update!(wallet_sync_timeout_fut) - } else { - let full_scan_request = self.onchain_wallet.get_full_scan_request(); - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - self.esplora_client.full_scan( - full_scan_request, - BDK_CLIENT_STOP_GAP, - BDK_CLIENT_CONCURRENCY, - ), - ); - get_and_apply_wallet_update!(wallet_sync_timeout_fut) - } - }; + let res = self.sync_onchain_wallet_inner().await; self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); res } + async fn sync_onchain_wallet_inner(&self) -> Result<(), Error> { + // If this is our first sync, do a full scan with the configured gap limit. + // Otherwise just do an incremental sync. + let incremental_sync = + self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + + macro_rules! get_and_apply_wallet_update { + ($sync_future: expr) => {{ + let now = Instant::now(); + match $sync_future.await { + Ok(res) => match res { + Ok(update) => match self.onchain_wallet.apply_update(update) { + Ok(()) => { + log_info!( + self.logger, + "{} of on-chain wallet finished in {}ms.", + if incremental_sync { "Incremental sync" } else { "Sync" }, + now.elapsed().as_millis() + ); + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger) + )?; + } + Ok(()) + }, + Err(e) => Err(e), + }, + Err(e) => match *e { + esplora_client::Error::Reqwest(he) => { + log_error!( + self.logger, + "{} of on-chain wallet failed due to HTTP connection error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + he + ); + Err(Error::WalletOperationFailed) + }, + _ => { + log_error!( + self.logger, + "{} of on-chain wallet failed due to Esplora error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationFailed) + }, + }, + }, + Err(e) => { + log_error!( + self.logger, + "{} of on-chain wallet timed out: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationTimeout) + }, + } + }} + } + + if incremental_sync { + let sync_request = self.onchain_wallet.get_incremental_sync_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } else { + let full_scan_request = self.onchain_wallet.get_full_scan_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + self.esplora_client.full_scan( + full_scan_request, + BDK_CLIENT_STOP_GAP, + BDK_CLIENT_CONCURRENCY, + ), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } + } + pub(super) async fn sync_lightning_wallet( &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, From c7655456185d35d3c840c9ff61fd61532427cc16 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 31 Jul 2025 14:41:06 +0200 Subject: [PATCH 038/184] Move inner code to `sync_lightning_wallet_inner` for Electrum Previously, we might have overlooked some cases that would exit the method and bubble up an error via `?` instead of propagating to all subscribers. Here, we split out the code to an inner method to ensure we always propagate. --- src/chain/electrum.rs | 54 +++++++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 44a637cc3..a287ad41a 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -188,26 +188,6 @@ impl ElectrumChainSource { &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the lightning wallet" - ); - return Err(Error::TxSyncFailed); - }; - - let sync_cman = Arc::clone(&channel_manager); - let sync_cmon = Arc::clone(&chain_monitor); - let sync_sweeper = Arc::clone(&output_sweeper); - let confirmables = vec![ - sync_cman as Arc, - sync_cmon as Arc, - sync_sweeper as Arc, - ]; - let receiver_res = { let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); status_lock.register_or_subscribe_pending_sync() @@ -221,6 +201,38 @@ impl ElectrumChainSource { })?; } + let res = + self.sync_lightning_wallet_inner(channel_manager, chain_monitor, output_sweeper).await; + + self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + async fn sync_lightning_wallet_inner( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + sync_cman as Arc, + sync_cmon as Arc, + sync_sweeper as Arc, + ]; + + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the lightning wallet" + ); + return Err(Error::TxSyncFailed); + }; + let res = electrum_client.sync_confirmables(confirmables).await; if let Ok(_) = res { @@ -245,8 +257,6 @@ impl ElectrumChainSource { )?; } - self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - res } From 128fdfd09388632046ab61d07565204b711b5a46 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 31 Jul 2025 14:43:26 +0200 Subject: [PATCH 039/184] Move inner code to `sync_onchain_wallet_inner` for Electrum Previously, we might have overlooked some cases that would exit the method and bubble up an error via `?` instead of propagating to all subscribers. Here, we split out the code to an inner method to ensure we always propagate. --- src/chain/electrum.rs | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index a287ad41a..6193c67b3 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -103,16 +103,6 @@ impl ElectrumChainSource { } pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the onchain wallet" - ); - return Err(Error::FeerateEstimationUpdateFailed); - }; let receiver_res = { let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); status_lock.register_or_subscribe_pending_sync() @@ -126,6 +116,24 @@ impl ElectrumChainSource { })?; } + let res = self.sync_onchain_wallet_inner().await; + + self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + async fn sync_onchain_wallet_inner(&self) -> Result<(), Error> { + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the onchain wallet" + ); + return Err(Error::FeerateEstimationUpdateFailed); + }; // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. let incremental_sync = @@ -179,8 +187,6 @@ impl ElectrumChainSource { apply_wallet_update(update_res, now) }; - self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - res } From 9444bc4d31d42ac1e315985dc0c611aac230091f Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 31 Jul 2025 14:46:56 +0200 Subject: [PATCH 040/184] Move inner code to `poll_and_update_listeners_inner` Previously, we might have overlooked some cases that would exit the method and bubble up an error via `?` instead of propagating to all subscribers. Here, we split out the code to an inner method to ensure we always propagate. --- src/chain/bitcoind.rs | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index b87ee13ed..5fd4bdd43 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -306,6 +306,19 @@ impl BitcoindChainSource { })?; } + let res = self + .poll_and_update_listeners_inner(channel_manager, chain_monitor, output_sweeper) + .await; + + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + async fn poll_and_update_listeners_inner( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); let chain_tip = if let Some(tip) = latest_chain_tip_opt { tip @@ -317,9 +330,7 @@ impl BitcoindChainSource { }, Err(e) => { log_error!(self.logger, "Failed to poll for chain data: {:?}", e); - let res = Err(Error::TxSyncFailed); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; + return Err(Error::TxSyncFailed); }, } }; @@ -348,9 +359,7 @@ impl BitcoindChainSource { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to poll for chain data: {:?}", e); - let res = Err(Error::TxSyncFailed); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; + return Err(Error::TxSyncFailed); }, } @@ -376,9 +385,7 @@ impl BitcoindChainSource { }, Err(e) => { log_error!(self.logger, "Failed to poll for mempool transactions: {:?}", e); - let res = Err(Error::TxSyncFailed); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; + return Err(Error::TxSyncFailed); }, } @@ -388,24 +395,13 @@ impl BitcoindChainSource { locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - let write_res = write_node_metrics( + write_node_metrics( &*locked_node_metrics, Arc::clone(&self.kv_store), Arc::clone(&self.logger), - ); - match write_res { - Ok(()) => (), - Err(e) => { - log_error!(self.logger, "Failed to persist node metrics: {}", e); - let res = Err(Error::PersistenceFailed); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } + )?; - let res = Ok(()); - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - res + Ok(()) } pub(super) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { From a6349a47e037f96cbc2589324e02954fcd0fba49 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 31 Jul 2025 15:01:58 +0200 Subject: [PATCH 041/184] Call `periodically_archive_fully_resolved_monitors` for Bitcoind Which we previously overlooked --- src/chain/bitcoind.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 5fd4bdd43..fc5f7048f 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -5,7 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use super::WalletSyncStatus; +use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; use crate::config::{ BitcoindRestClientConfig, Config, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, @@ -340,7 +340,7 @@ impl BitcoindChainSource { let chain_listener = ChainListener { onchain_wallet: Arc::clone(&self.onchain_wallet), channel_manager: Arc::clone(&channel_manager), - chain_monitor, + chain_monitor: Arc::clone(&chain_monitor), output_sweeper, }; let mut spv_client = @@ -355,6 +355,14 @@ impl BitcoindChainSource { now.elapsed().unwrap().as_millis() ); *self.latest_chain_tip.write().unwrap() = Some(tip); + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + chain_monitor, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.node_metrics), + )?; }, Ok(_) => {}, Err(e) => { From be2bc0782cfcef658dc751a96c85af717e3d491c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 7 Aug 2025 14:05:00 +0200 Subject: [PATCH 042/184] Bump `electrum-client` to v0.24.0 We bump the `electrum-client` dependency to the recently-introduced version v0.24.0. --- Cargo.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dcb7d022f..da04c4f4a 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,7 +82,7 @@ esplora-client = { version = "0.12", default-features = false, features = ["toki # `lightning-transaction-sync` APIs. We should drop it as part of the upgrade # to LDK 0.2. esplora-client_0_11 = { package = "esplora-client", version = "0.11", default-features = false, features = ["tokio", "async-https-rustls"] } -electrum-client = { version = "0.23.1", default-features = true } +electrum-client = { version = "0.24.0", default-features = true } libc = "0.2" uniffi = { version = "0.28.3", features = ["build"], optional = true } serde = { version = "1.0.210", default-features = false, features = ["std", "derive"] } @@ -103,11 +103,11 @@ proptest = "1.0.0" regex = "1.5.6" [target.'cfg(not(no_download))'.dev-dependencies] -electrsd = { version = "0.34.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } +electrsd = { version = "0.35.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } [target.'cfg(no_download)'.dev-dependencies] -electrsd = { version = "0.34.0", default-features = false, features = ["legacy"] } -corepc-node = { version = "0.7.0", default-features = false, features = ["27_2"] } +electrsd = { version = "0.35.0", default-features = false, features = ["legacy"] } +corepc-node = { version = "0.8.0", default-features = false, features = ["27_2"] } [target.'cfg(cln_test)'.dev-dependencies] clightningrpc = { version = "0.3.0-beta.8", default-features = false } From 3d356910790923b5f54dfc6d4f52cea87619a08a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 7 Aug 2025 14:18:02 +0200 Subject: [PATCH 043/184] Fix lifetime elision warnings introduced by rustc 1.89 The just-released rustc 1.89 added a new `mismatched-lifetime-syntaxes` lint which had two warnings pop up. We fix these here. --- src/chain/bitcoind.rs | 2 +- src/tx_broadcaster.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index fc5f7048f..d7d325460 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -1155,7 +1155,7 @@ impl BlockSource for BitcoindClient { } } - fn get_best_block(&self) -> AsyncBlockSourceResult<(bitcoin::BlockHash, Option)> { + fn get_best_block(&self) -> AsyncBlockSourceResult<'_, (bitcoin::BlockHash, Option)> { match self { BitcoindClient::Rpc { rpc_client, .. } => { Box::pin(async move { rpc_client.get_best_block().await }) diff --git a/src/tx_broadcaster.rs b/src/tx_broadcaster.rs index 09189b137..4d9397a61 100644 --- a/src/tx_broadcaster.rs +++ b/src/tx_broadcaster.rs @@ -36,7 +36,9 @@ where Self { queue_sender, queue_receiver: Mutex::new(queue_receiver), logger } } - pub(crate) async fn get_broadcast_queue(&self) -> MutexGuard>> { + pub(crate) async fn get_broadcast_queue( + &self, + ) -> MutexGuard<'_, mpsc::Receiver>> { self.queue_receiver.lock().await } } From 1af27ac46363657a05d03d27087bcbd725845f46 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 11 Aug 2025 10:33:55 +0200 Subject: [PATCH 044/184] Bump MSRV to rustc 1.85 We generally align our MSRV with Debian's stable channel. Debian 13 'Trixie' was just released, shipping rustc 1.85. We therefore bump our MSRV on the `main` branch here. --- .github/workflows/rust.yml | 11 +++-------- README.md | 2 +- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 1c4e6ed15..aff610908 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -18,7 +18,7 @@ jobs: toolchain: [ stable, beta, - 1.75.0, # Our MSRV + 1.85.0, # Our MSRV ] include: - toolchain: stable @@ -29,7 +29,7 @@ jobs: platform: macos-latest - toolchain: stable platform: windows-latest - - toolchain: 1.75.0 + - toolchain: 1.85.0 msrv: true runs-on: ${{ matrix.platform }} steps: @@ -42,11 +42,6 @@ jobs: - name: Check formatting on Rust ${{ matrix.toolchain }} if: matrix.check-fmt run: rustup component add rustfmt && cargo fmt --all -- --check - - name: Pin packages to allow for MSRV - if: matrix.msrv - run: | - cargo update -p home --precise "0.5.9" --verbose # home v0.5.11 requires rustc 1.81 or newer - cargo update -p idna_adapter --precise "1.1.0" --verbose # idna_adapter 1.2 switched to ICU4X, requiring 1.81 and newer - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" @@ -79,7 +74,7 @@ jobs: if: matrix.build-uniffi run: cargo build --features uniffi --verbose --color always - name: Build documentation on Rust ${{ matrix.toolchain }} - if: "matrix.platform != 'windows-latest' || matrix.toolchain != '1.75.0'" + if: "matrix.platform != 'windows-latest' || matrix.toolchain != '1.85.0'" run: | cargo doc --release --verbose --color always cargo doc --document-private-items --verbose --color always diff --git a/README.md b/README.md index ed35d8640..d11c5fc8e 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ LDK Node currently comes with a decidedly opinionated set of design choices: LDK Node itself is written in [Rust][rust] and may therefore be natively added as a library dependency to any `std` Rust program. However, beyond its Rust API it also offers language bindings for [Swift][swift], [Kotlin][kotlin], and [Python][python] based on the [UniFFI](https://github.com/mozilla/uniffi-rs/). Moreover, [Flutter bindings][flutter_bindings] are also available. ## MSRV -The Minimum Supported Rust Version (MSRV) is currently 1.75.0. +The Minimum Supported Rust Version (MSRV) is currently 1.85.0. [api_docs]: https://docs.rs/ldk-node/*/ldk_node/ [api_docs_node]: https://docs.rs/ldk-node/*/ldk_node/struct.Node.html From 90cde611afb078314901b3b466eb80776a2450d4 Mon Sep 17 00:00:00 2001 From: aagbotemi Date: Sat, 31 May 2025 12:42:17 +0100 Subject: [PATCH 045/184] feat: add ability to set payment preimage for spontaneous payments --- bindings/ldk_node.udl | 4 ++ src/payment/spontaneous.rs | 26 ++++++++++-- tests/integration_tests_rust.rs | 72 ++++++++++++++++++++++++++++++++- 3 files changed, 96 insertions(+), 6 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 3c240b43c..26480ca4b 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -213,6 +213,10 @@ interface SpontaneousPayment { [Throws=NodeError] PaymentId send_with_custom_tlvs(u64 amount_msat, PublicKey node_id, SendingParameters? sending_parameters, sequence custom_tlvs); [Throws=NodeError] + PaymentId send_with_preimage(u64 amount_msat, PublicKey node_id, PaymentPreimage preimage, SendingParameters? sending_parameters); + [Throws=NodeError] + PaymentId send_with_preimage_and_custom_tlvs(u64 amount_msat, PublicKey node_id, sequence custom_tlvs, PaymentPreimage preimage, SendingParameters? sending_parameters); + [Throws=NodeError] void send_probes(u64 amount_msat, PublicKey node_id); }; diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 1508b6cd8..a7e7876d7 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -57,7 +57,7 @@ impl SpontaneousPayment { pub fn send( &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, ) -> Result { - self.send_inner(amount_msat, node_id, sending_parameters, None) + self.send_inner(amount_msat, node_id, sending_parameters, None, None) } /// Send a spontaneous payment including a list of custom TLVs. @@ -65,19 +65,37 @@ impl SpontaneousPayment { &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, custom_tlvs: Vec, ) -> Result { - self.send_inner(amount_msat, node_id, sending_parameters, Some(custom_tlvs)) + self.send_inner(amount_msat, node_id, sending_parameters, Some(custom_tlvs), None) + } + + /// Send a spontaneous payment with custom preimage + pub fn send_with_preimage( + &self, amount_msat: u64, node_id: PublicKey, preimage: PaymentPreimage, + sending_parameters: Option, + ) -> Result { + self.send_inner(amount_msat, node_id, sending_parameters, None, Some(preimage)) + } + + /// Send a spontaneous payment with custom preimage including a list of custom TLVs. + pub fn send_with_preimage_and_custom_tlvs( + &self, amount_msat: u64, node_id: PublicKey, custom_tlvs: Vec, + preimage: PaymentPreimage, sending_parameters: Option, + ) -> Result { + self.send_inner(amount_msat, node_id, sending_parameters, Some(custom_tlvs), Some(preimage)) } fn send_inner( &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, - custom_tlvs: Option>, + custom_tlvs: Option>, preimage: Option, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); } - let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes()); + let payment_preimage = preimage + .unwrap_or_else(|| PaymentPreimage(self.keys_manager.get_secure_random_bytes())); + let payment_hash = PaymentHash::from(payment_preimage); let payment_id = PaymentId(payment_hash.0); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index fbd95ef50..57742e09e 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -19,8 +19,8 @@ use common::{ use ldk_node::config::EsploraSyncConfig; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ - ConfirmationStatus, PaymentDirection, PaymentKind, PaymentStatus, QrPaymentResult, - SendingParameters, + ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, + QrPaymentResult, SendingParameters, }; use ldk_node::{Builder, Event, NodeError}; @@ -29,8 +29,10 @@ use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::util::persist::KVStore; use lightning_invoice::{Bolt11InvoiceDescription, Description}; +use lightning_types::payment::PaymentPreimage; use bitcoin::address::NetworkUnchecked; +use bitcoin::hashes::sha256::Hash as Sha256Hash; use bitcoin::hashes::Hash; use bitcoin::Address; use bitcoin::Amount; @@ -1389,3 +1391,69 @@ fn facade_logging() { validate_log_entry(entry); } } + +#[test] +fn spontaneous_send_with_custom_preimage() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let premine_sat = 1_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premine_sat), + ); + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + open_channel(&node_a, &node_b, 500_000, true, &electrsd); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let seed = b"test_payment_preimage"; + let bytes: Sha256Hash = Sha256Hash::hash(seed); + let custom_bytes = bytes.to_byte_array(); + let custom_preimage = PaymentPreimage(custom_bytes); + + let amount_msat = 100_000; + let payment_id = node_a + .spontaneous_payment() + .send_with_preimage(amount_msat, node_b.node_id(), custom_preimage, None) + .unwrap(); + + // check payment status and verify stored preimage + expect_payment_successful_event!(node_a, Some(payment_id), None); + let details: PaymentDetails = + node_a.list_payments_with_filter(|p| p.id == payment_id).first().unwrap().clone(); + assert_eq!(details.status, PaymentStatus::Succeeded); + if let PaymentKind::Spontaneous { preimage: Some(pi), .. } = details.kind { + assert_eq!(pi.0, custom_bytes); + } else { + panic!("Expected a spontaneous PaymentKind with a preimage"); + } + + // Verify receiver side (node_b) + expect_payment_received_event!(node_b, amount_msat); + let receiver_payments: Vec = node_b.list_payments_with_filter(|p| { + p.direction == PaymentDirection::Inbound + && matches!(p.kind, PaymentKind::Spontaneous { .. }) + }); + + assert_eq!(receiver_payments.len(), 1); + let receiver_details = &receiver_payments[0]; + assert_eq!(receiver_details.status, PaymentStatus::Succeeded); + assert_eq!(receiver_details.amount_msat, Some(amount_msat)); + assert_eq!(receiver_details.direction, PaymentDirection::Inbound); + + // Verify receiver also has the same preimage + if let PaymentKind::Spontaneous { preimage: Some(pi), .. } = &receiver_details.kind { + assert_eq!(pi.0, custom_bytes); + } else { + panic!("Expected receiver to have spontaneous PaymentKind with preimage"); + } +} From 1d06c7aa1c2c966d89bde3991c86e70c0941ac43 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 1 Aug 2025 14:16:24 +0200 Subject: [PATCH 046/184] Ensure we always startup with a `rustls` `CryptoProvider` The `rustls` library recently introduced this weird behavior where they expect users to, apart from configuring the respective feature, also explictly call `CryptoProvider::install_default`. Otherwise they'd simply panic at runtime whenever the first network call requiring TLS would be made. While we already made a change upstream at `rust-electrum-client`, we also make sure here that we definitely, always, absolutley are sure that we have a `CryptoProvider` set on startup. --- Cargo.toml | 1 + src/builder.rs | 23 ++++++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index da04c4f4a..96a9eea53 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,6 +67,7 @@ bdk_electrum = { version = "0.23.0", default-features = false, features = ["use- bdk_wallet = { version = "2.0.0", default-features = false, features = ["std", "keys-bip39"]} reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } +rustls = { version = "0.23", default-features = false } rusqlite = { version = "0.31.0", features = ["bundled"] } bitcoin = "0.32.4" bip39 = "2.0.0" diff --git a/src/builder.rs b/src/builder.rs index 85ec70d18..1e9c731e3 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -75,7 +75,7 @@ use std::fmt; use std::fs; use std::path::PathBuf; use std::sync::atomic::AtomicBool; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, Mutex, Once, RwLock}; use std::time::SystemTime; use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; @@ -1051,6 +1051,8 @@ fn build_with_store_internal( liquidity_source_config: Option<&LiquiditySourceConfig>, seed_bytes: [u8; 64], logger: Arc, kv_store: Arc, ) -> Result { + optionally_install_rustls_cryptoprovider(); + if let Err(err) = may_announce_channel(&config) { if config.announcement_addresses.is_some() { log_error!(logger, "Announcement addresses were set but some required configuration options for node announcement are missing: {}", err); @@ -1663,6 +1665,25 @@ fn build_with_store_internal( }) } +fn optionally_install_rustls_cryptoprovider() { + // Acquire a global Mutex, ensuring that only one process at a time install the provider. This + // is mostly required for running tests concurrently. + static INIT_CRYPTO: Once = Once::new(); + + INIT_CRYPTO.call_once(|| { + // Ensure we always install a `CryptoProvider` for `rustls` if it was somehow not previously installed by now. + if rustls::crypto::CryptoProvider::get_default().is_none() { + let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + } + + // Refuse to startup without TLS support. Better to catch it now than even later at runtime. + assert!( + rustls::crypto::CryptoProvider::get_default().is_some(), + "We need to have a CryptoProvider" + ); + }); +} + /// Sets up the node logger. fn setup_logger( log_writer_config: &Option, config: &Config, From 3ff489068f6e24c362980e1ea208f4937dbe4ab9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 14 Aug 2025 14:45:08 +0200 Subject: [PATCH 047/184] Add test that drops the node in an async context .. as tokio tends to panic if dropping a runtime in an async context and we're not super careful. Here, we add some test coverage for this edge case in Rust tests. --- tests/integration_tests_rust.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 57742e09e..ad3867429 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -1457,3 +1457,14 @@ fn spontaneous_send_with_custom_preimage() { panic!("Expected receiver to have spontaneous PaymentKind with preimage"); } } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn drop_in_async_context() { + let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let seed_bytes = vec![42u8; 64]; + + let config = random_config(true); + let node = setup_node(&chain_source, config, Some(seed_bytes)); + node.stop().unwrap(); +} From b7c07043e6cc1b57ac9019f3efbd1e8f3201f818 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 14 Aug 2025 13:35:39 +0200 Subject: [PATCH 048/184] Have `ChainSource` hold `tx_broadcaster` reference Previously, individual chain sources would hold references to the `Broadcaster` to acquire the broadcast queue. Here, we move this to `ChainSource`, which allows us to handle the queue in a single place, while the individual chain sources will deal with the actual packages only. --- src/chain/bitcoind.rs | 76 +++++++++++------------- src/chain/electrum.rs | 18 ++---- src/chain/esplora.rs | 131 ++++++++++++++++++++---------------------- src/chain/mod.rs | 36 ++++++------ 4 files changed, 119 insertions(+), 142 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index d7d325460..a120f8253 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -16,7 +16,7 @@ use crate::fee_estimator::{ }; use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; -use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; @@ -54,7 +54,6 @@ pub(super) struct BitcoindChainSource { onchain_wallet: Arc, wallet_polling_status: Mutex, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, @@ -65,8 +64,8 @@ impl BitcoindChainSource { pub(crate) fn new_rpc( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - logger: Arc, node_metrics: Arc>, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, ) -> Self { let api_client = Arc::new(BitcoindClient::new_rpc( rpc_host.clone(), @@ -85,7 +84,6 @@ impl BitcoindChainSource { onchain_wallet, wallet_polling_status, fee_estimator, - tx_broadcaster, kv_store, config, logger: Arc::clone(&logger), @@ -96,9 +94,8 @@ impl BitcoindChainSource { pub(crate) fn new_rest( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - rest_client_config: BitcoindRestClientConfig, logger: Arc, - node_metrics: Arc>, + kv_store: Arc, config: Arc, rest_client_config: BitcoindRestClientConfig, + logger: Arc, node_metrics: Arc>, ) -> Self { let api_client = Arc::new(BitcoindClient::new_rest( rest_client_config.rest_host, @@ -120,7 +117,6 @@ impl BitcoindChainSource { wallet_polling_status, onchain_wallet, fee_estimator, - tx_broadcaster, kv_store, config, logger: Arc::clone(&logger), @@ -530,53 +526,45 @@ impl BitcoindChainSource { Ok(()) } - pub(crate) async fn process_broadcast_queue(&self) { + pub(crate) async fn process_broadcast_package(&self, package: Vec) { // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 // features, we should eventually switch to use `submitpackage` via the // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual // transactions. - let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let txid = tx.compute_txid(); - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - self.api_client.broadcast_transaction(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(id) => { - debug_assert_eq!(id, txid); - log_trace!(self.logger, "Successfully broadcast transaction {}", txid); - }, - Err(e) => { - log_error!( - self.logger, - "Failed to broadcast transaction {}: {}", - txid, - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, + for tx in &package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + self.api_client.broadcast_transaction(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(id) => { + debug_assert_eq!(id, txid); + log_trace!(self.logger, "Successfully broadcast transaction {}", txid); }, Err(e) => { - log_error!( - self.logger, - "Failed to broadcast transaction due to timeout {}: {}", - txid, - e - ); + log_error!(self.logger, "Failed to broadcast transaction {}: {}", txid, e); log_trace!( self.logger, "Failed broadcast transaction bytes: {}", log_bytes!(tx.encode()) ); }, - } + }, + Err(e) => { + log_error!( + self.logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, } } } diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 6193c67b3..abbb758dd 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -18,7 +18,7 @@ use crate::fee_estimator::{ }; use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; -use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::NodeMetrics; use lightning::chain::{Confirm, Filter, WatchedOutput}; @@ -56,7 +56,6 @@ pub(super) struct ElectrumChainSource { onchain_wallet_sync_status: Mutex, lightning_wallet_sync_status: Mutex, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, @@ -66,9 +65,8 @@ pub(super) struct ElectrumChainSource { impl ElectrumChainSource { pub(super) fn new( server_url: String, sync_config: ElectrumSyncConfig, onchain_wallet: Arc, - fee_estimator: Arc, tx_broadcaster: Arc, - kv_store: Arc, config: Arc, logger: Arc, - node_metrics: Arc>, + fee_estimator: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, ) -> Self { let electrum_runtime_status = RwLock::new(ElectrumRuntimeStatus::new()); let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); @@ -81,7 +79,6 @@ impl ElectrumChainSource { onchain_wallet_sync_status, lightning_wallet_sync_status, fee_estimator, - tx_broadcaster, kv_store, config, logger: Arc::clone(&logger), @@ -302,7 +299,7 @@ impl ElectrumChainSource { Ok(()) } - pub(crate) async fn process_broadcast_queue(&self) { + pub(crate) async fn process_broadcast_package(&self, package: Vec) { let electrum_client: Arc = if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { Arc::clone(client) @@ -311,11 +308,8 @@ impl ElectrumChainSource { return; }; - let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in next_package { - electrum_client.broadcast(tx).await; - } + for tx in package { + electrum_client.broadcast(tx).await; } } } diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index 5932426b7..a8806a413 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -18,7 +18,7 @@ use crate::fee_estimator::{ }; use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; -use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; use lightning::chain::{Confirm, Filter, WatchedOutput}; @@ -30,7 +30,7 @@ use bdk_esplora::EsploraAsyncExt; use esplora_client::AsyncClient as EsploraAsyncClient; -use bitcoin::{FeeRate, Network, Script, Txid}; +use bitcoin::{FeeRate, Network, Script, Transaction, Txid}; use std::collections::HashMap; use std::sync::{Arc, Mutex, RwLock}; @@ -44,7 +44,6 @@ pub(super) struct EsploraChainSource { tx_sync: Arc>>, lightning_wallet_sync_status: Mutex, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, @@ -55,8 +54,8 @@ impl EsploraChainSource { pub(crate) fn new( server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - logger: Arc, node_metrics: Arc>, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, ) -> Self { // FIXME / TODO: We introduced this to make `bdk_esplora` work separately without updating // `lightning-transaction-sync`. We should revert this as part of of the upgrade to LDK 0.2. @@ -90,7 +89,6 @@ impl EsploraChainSource { tx_sync, lightning_wallet_sync_status, fee_estimator, - tx_broadcaster, kv_store, config, logger, @@ -372,76 +370,73 @@ impl EsploraChainSource { Ok(()) } - pub(crate) async fn process_broadcast_queue(&self) { - let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let txid = tx.compute_txid(); - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - self.esplora_client.broadcast(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_trace!(self.logger, "Successfully broadcast transaction {}", txid); - }, - Err(e) => match e { - esplora_client::Error::HttpResponse { status, message } => { - if status == 400 { - // Log 400 at lesser level, as this often just means bitcoind already knows the - // transaction. - // FIXME: We can further differentiate here based on the error - // message which will be available with rust-esplora-client 0.7 and - // later. - log_trace!( - self.logger, - "Failed to broadcast due to HTTP connection error: {}", - message - ); - } else { - log_error!( - self.logger, - "Failed to broadcast due to HTTP connection error: {} - {}", - status, - message - ); - } + pub(crate) async fn process_broadcast_package(&self, package: Vec) { + for tx in &package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + self.esplora_client.broadcast(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_trace!(self.logger, "Successfully broadcast transaction {}", txid); + }, + Err(e) => match e { + esplora_client::Error::HttpResponse { status, message } => { + if status == 400 { + // Log 400 at lesser level, as this often just means bitcoind already knows the + // transaction. + // FIXME: We can further differentiate here based on the error + // message which will be available with rust-esplora-client 0.7 and + // later. log_trace!( self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) + "Failed to broadcast due to HTTP connection error: {}", + message ); - }, - _ => { + } else { log_error!( self.logger, - "Failed to broadcast transaction {}: {}", - txid, - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) + "Failed to broadcast due to HTTP connection error: {} - {}", + status, + message ); - }, + } + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + _ => { + log_error!( + self.logger, + "Failed to broadcast transaction {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); }, }, - Err(e) => { - log_error!( - self.logger, - "Failed to broadcast transaction due to timeout {}: {}", - txid, - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - } + }, + Err(e) => { + log_error!( + self.logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, } } } diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 91cce1fe3..db0573d3d 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -87,6 +87,7 @@ impl WalletSyncStatus { pub(crate) struct ChainSource { kind: ChainSourceKind, + tx_broadcaster: Arc, logger: Arc, } @@ -109,14 +110,13 @@ impl ChainSource { sync_config, onchain_wallet, fee_estimator, - tx_broadcaster, kv_store, config, Arc::clone(&logger), node_metrics, ); let kind = ChainSourceKind::Esplora(esplora_chain_source); - Self { kind, logger } + Self { kind, tx_broadcaster, logger } } pub(crate) fn new_electrum( @@ -130,14 +130,13 @@ impl ChainSource { sync_config, onchain_wallet, fee_estimator, - tx_broadcaster, kv_store, config, Arc::clone(&logger), node_metrics, ); let kind = ChainSourceKind::Electrum(electrum_chain_source); - Self { kind, logger } + Self { kind, tx_broadcaster, logger } } pub(crate) fn new_bitcoind_rpc( @@ -153,14 +152,13 @@ impl ChainSource { rpc_password, onchain_wallet, fee_estimator, - tx_broadcaster, kv_store, config, Arc::clone(&logger), node_metrics, ); let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); - Self { kind, logger } + Self { kind, tx_broadcaster, logger } } pub(crate) fn new_bitcoind_rest( @@ -177,7 +175,6 @@ impl ChainSource { rpc_password, onchain_wallet, fee_estimator, - tx_broadcaster, kv_store, config, rest_client_config, @@ -185,7 +182,7 @@ impl ChainSource { node_metrics, ); let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); - Self { kind, logger } + Self { kind, tx_broadcaster, logger } } pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { @@ -429,16 +426,19 @@ impl ChainSource { } pub(crate) async fn process_broadcast_queue(&self) { - match &self.kind { - ChainSourceKind::Esplora(esplora_chain_source) => { - esplora_chain_source.process_broadcast_queue().await - }, - ChainSourceKind::Electrum(electrum_chain_source) => { - electrum_chain_source.process_broadcast_queue().await - }, - ChainSourceKind::Bitcoind(bitcoind_chain_source) => { - bitcoind_chain_source.process_broadcast_queue().await - }, + let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.process_broadcast_package(next_package).await + }, + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.process_broadcast_package(next_package).await + }, + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source.process_broadcast_package(next_package).await + }, + } } } } From 6b9d899e22b9a444f4809cc431b4f41efbef4fd3 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 14 Aug 2025 13:42:43 +0200 Subject: [PATCH 049/184] Move continuous tx broadcast processing loop to `ChainSource` Rather than looping in the `spawn` method directly, we move the loop to a refactored `continuously_process_broadcast_queue` method on `ChainSource`, which also allows us to react on the stop signal if we're polling `recv`. --- src/chain/mod.rs | 40 +++++++++++++++++++++++++++------------- src/lib.rs | 21 ++------------------- 2 files changed, 29 insertions(+), 32 deletions(-) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index db0573d3d..a4ab2c76b 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -18,7 +18,7 @@ use crate::config::{ }; use crate::fee_estimator::OnchainFeeEstimator; use crate::io::utils::write_node_metrics; -use crate::logger::{log_info, log_trace, LdkLogger, Logger}; +use crate::logger::{log_debug, log_info, log_trace, LdkLogger, Logger}; use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -425,19 +425,33 @@ impl ChainSource { } } - pub(crate) async fn process_broadcast_queue(&self) { + pub(crate) async fn continuously_process_broadcast_queue( + &self, mut stop_tx_bcast_receiver: tokio::sync::watch::Receiver<()>, + ) { let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - match &self.kind { - ChainSourceKind::Esplora(esplora_chain_source) => { - esplora_chain_source.process_broadcast_package(next_package).await - }, - ChainSourceKind::Electrum(electrum_chain_source) => { - electrum_chain_source.process_broadcast_package(next_package).await - }, - ChainSourceKind::Bitcoind(bitcoind_chain_source) => { - bitcoind_chain_source.process_broadcast_package(next_package).await - }, + loop { + let tx_bcast_logger = Arc::clone(&self.logger); + tokio::select! { + _ = stop_tx_bcast_receiver.changed() => { + log_debug!( + tx_bcast_logger, + "Stopping broadcasting transactions.", + ); + return; + } + Some(next_package) = receiver.recv() => { + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.process_broadcast_package(next_package).await + }, + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.process_broadcast_package(next_package).await + }, + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source.process_broadcast_package(next_package).await + }, + } + } } } } diff --git a/src/lib.rs b/src/lib.rs index 89a17ab03..4ebc30b9b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -498,27 +498,10 @@ impl Node { }); } - let mut stop_tx_bcast = self.stop_sender.subscribe(); + let stop_tx_bcast = self.stop_sender.subscribe(); let chain_source = Arc::clone(&self.chain_source); - let tx_bcast_logger = Arc::clone(&self.logger); runtime.spawn(async move { - // Every second we try to clear our broadcasting queue. - let mut interval = tokio::time::interval(Duration::from_secs(1)); - interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - tokio::select! { - _ = stop_tx_bcast.changed() => { - log_debug!( - tx_bcast_logger, - "Stopping broadcasting transactions.", - ); - return; - } - _ = interval.tick() => { - chain_source.process_broadcast_queue().await; - } - } - } + chain_source.continuously_process_broadcast_queue(stop_tx_bcast).await }); let bump_tx_event_handler = Arc::new(BumpTransactionEventHandler::new( From 17a45dd18bcca217e32ef9c98483e4f4fed91f34 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 14 Aug 2025 14:23:59 +0200 Subject: [PATCH 050/184] Wait on all background tasks to finish (or abort) Previously, we'd only wait for the background processor tasks to successfully finish. It turned out that this could lead to races when the other background tasks took too long to shutdown. Here, we attempt to wait on all background tasks shutting down for a bit, before moving on. --- src/builder.rs | 4 ++ src/config.rs | 3 ++ src/lib.rs | 143 +++++++++++++++++++++++++++++++++++++------------ 3 files changed, 117 insertions(+), 33 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 85ec70d18..5ead3783b 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1632,11 +1632,15 @@ fn build_with_store_internal( let (stop_sender, _) = tokio::sync::watch::channel(()); let background_processor_task = Mutex::new(None); + let background_tasks = Mutex::new(None); + let cancellable_background_tasks = Mutex::new(None); Ok(Node { runtime, stop_sender, background_processor_task, + background_tasks, + cancellable_background_tasks, config, wallet, chain_source, diff --git a/src/config.rs b/src/config.rs index a5048e64f..02df8bbc7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -79,6 +79,9 @@ pub(crate) const LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 10; // The timeout after which we give up waiting on LDK's event handler to exit on shutdown. pub(crate) const LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS: u64 = 30; +// The timeout after which we give up waiting on a background task to exit on shutdown. +pub(crate) const BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS: u64 = 5; + // The timeout after which we abort a fee rate cache update operation. pub(crate) const FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS: u64 = 5; diff --git a/src/lib.rs b/src/lib.rs index 4ebc30b9b..a3cce0752 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -127,8 +127,8 @@ pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ default_user_config, may_announce_channel, ChannelConfig, Config, - LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, - RGS_SYNC_INTERVAL, + BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS, LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, + NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; use event::{EventHandler, EventQueue}; @@ -179,6 +179,8 @@ pub struct Node { runtime: Arc>>>, stop_sender: tokio::sync::watch::Sender<()>, background_processor_task: Mutex>>, + background_tasks: Mutex>>, + cancellable_background_tasks: Mutex>>, config: Arc, wallet: Arc, chain_source: Arc, @@ -232,6 +234,10 @@ impl Node { return Err(Error::AlreadyRunning); } + let mut background_tasks = tokio::task::JoinSet::new(); + let mut cancellable_background_tasks = tokio::task::JoinSet::new(); + let runtime_handle = runtime.handle(); + log_info!( self.logger, "Starting up LDK Node with node ID {} on network: {}", @@ -258,11 +264,19 @@ impl Node { let sync_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); - runtime.spawn(async move { - chain_source - .continuously_sync_wallets(stop_sync_receiver, sync_cman, sync_cmon, sync_sweeper) - .await; - }); + background_tasks.spawn_on( + async move { + chain_source + .continuously_sync_wallets( + stop_sync_receiver, + sync_cman, + sync_cmon, + sync_sweeper, + ) + .await; + }, + runtime_handle, + ); if self.gossip_source.is_rgs() { let gossip_source = Arc::clone(&self.gossip_source); @@ -270,7 +284,7 @@ impl Node { let gossip_sync_logger = Arc::clone(&self.logger); let gossip_node_metrics = Arc::clone(&self.node_metrics); let mut stop_gossip_sync = self.stop_sender.subscribe(); - runtime.spawn(async move { + cancellable_background_tasks.spawn_on(async move { let mut interval = tokio::time::interval(RGS_SYNC_INTERVAL); loop { tokio::select! { @@ -311,7 +325,7 @@ impl Node { } } } - }); + }, runtime_handle); } if let Some(listening_addresses) = &self.config.listening_addresses { @@ -337,7 +351,7 @@ impl Node { bind_addrs.extend(resolved_address); } - runtime.spawn(async move { + cancellable_background_tasks.spawn_on(async move { { let listener = tokio::net::TcpListener::bind(&*bind_addrs).await @@ -356,7 +370,7 @@ impl Node { _ = stop_listen.changed() => { log_debug!( listening_logger, - "Stopping listening to inbound connections.", + "Stopping listening to inbound connections." ); break; } @@ -375,7 +389,7 @@ impl Node { } listening_indicator.store(false, Ordering::Release); - }); + }, runtime_handle); } // Regularly reconnect to persisted peers. @@ -384,7 +398,7 @@ impl Node { let connect_logger = Arc::clone(&self.logger); let connect_peer_store = Arc::clone(&self.peer_store); let mut stop_connect = self.stop_sender.subscribe(); - runtime.spawn(async move { + cancellable_background_tasks.spawn_on(async move { let mut interval = tokio::time::interval(PEER_RECONNECTION_INTERVAL); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); loop { @@ -392,7 +406,7 @@ impl Node { _ = stop_connect.changed() => { log_debug!( connect_logger, - "Stopping reconnecting known peers.", + "Stopping reconnecting known peers." ); return; } @@ -412,7 +426,7 @@ impl Node { } } } - }); + }, runtime_handle); // Regularly broadcast node announcements. let bcast_cm = Arc::clone(&self.channel_manager); @@ -424,7 +438,7 @@ impl Node { let mut stop_bcast = self.stop_sender.subscribe(); let node_alias = self.config.node_alias.clone(); if may_announce_channel(&self.config).is_ok() { - runtime.spawn(async move { + cancellable_background_tasks.spawn_on(async move { // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. #[cfg(not(test))] let mut interval = tokio::time::interval(Duration::from_secs(30)); @@ -495,14 +509,15 @@ impl Node { } } } - }); + }, runtime_handle); } let stop_tx_bcast = self.stop_sender.subscribe(); let chain_source = Arc::clone(&self.chain_source); - runtime.spawn(async move { - chain_source.continuously_process_broadcast_queue(stop_tx_bcast).await - }); + cancellable_background_tasks.spawn_on( + async move { chain_source.continuously_process_broadcast_queue(stop_tx_bcast).await }, + runtime_handle, + ); let bump_tx_event_handler = Arc::new(BumpTransactionEventHandler::new( Arc::clone(&self.tx_broadcaster), @@ -587,24 +602,33 @@ impl Node { let mut stop_liquidity_handler = self.stop_sender.subscribe(); let liquidity_handler = Arc::clone(&liquidity_source); let liquidity_logger = Arc::clone(&self.logger); - runtime.spawn(async move { - loop { - tokio::select! { - _ = stop_liquidity_handler.changed() => { - log_debug!( - liquidity_logger, - "Stopping processing liquidity events.", - ); - return; + background_tasks.spawn_on( + async move { + loop { + tokio::select! { + _ = stop_liquidity_handler.changed() => { + log_debug!( + liquidity_logger, + "Stopping processing liquidity events.", + ); + return; + } + _ = liquidity_handler.handle_next_event() => {} } - _ = liquidity_handler.handle_next_event() => {} } - } - }); + }, + runtime_handle, + ); } *runtime_lock = Some(runtime); + debug_assert!(self.background_tasks.lock().unwrap().is_none()); + *self.background_tasks.lock().unwrap() = Some(background_tasks); + + debug_assert!(self.cancellable_background_tasks.lock().unwrap().is_none()); + *self.cancellable_background_tasks.lock().unwrap() = Some(cancellable_background_tasks); + log_info!(self.logger, "Startup complete."); Ok(()) } @@ -635,6 +659,17 @@ impl Node { }, } + // Cancel cancellable background tasks + if let Some(mut tasks) = self.cancellable_background_tasks.lock().unwrap().take() { + let runtime_2 = Arc::clone(&runtime); + tasks.abort_all(); + tokio::task::block_in_place(move || { + runtime_2.block_on(async { while let Some(_) = tasks.join_next().await {} }) + }); + } else { + debug_assert!(false, "Expected some cancellable background tasks"); + }; + // Disconnect all peers. self.peer_manager.disconnect_all_peers(); log_debug!(self.logger, "Disconnected all network peers."); @@ -643,6 +678,46 @@ impl Node { self.chain_source.stop(); log_debug!(self.logger, "Stopped chain sources."); + // Wait until non-cancellable background tasks (mod LDK's background processor) are done. + let runtime_3 = Arc::clone(&runtime); + if let Some(mut tasks) = self.background_tasks.lock().unwrap().take() { + tokio::task::block_in_place(move || { + runtime_3.block_on(async { + loop { + let timeout_fut = tokio::time::timeout( + Duration::from_secs(BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS), + tasks.join_next_with_id(), + ); + match timeout_fut.await { + Ok(Some(Ok((id, _)))) => { + log_trace!(self.logger, "Stopped background task with id {}", id); + }, + Ok(Some(Err(e))) => { + tasks.abort_all(); + log_trace!(self.logger, "Stopping background task failed: {}", e); + break; + }, + Ok(None) => { + log_debug!(self.logger, "Stopped all background tasks"); + break; + }, + Err(e) => { + tasks.abort_all(); + log_error!( + self.logger, + "Stopping background task timed out: {}", + e + ); + break; + }, + } + } + }) + }); + } else { + debug_assert!(false, "Expected some background tasks"); + }; + // Wait until background processing stopped, at least until a timeout is reached. if let Some(background_processor_task) = self.background_processor_task.lock().unwrap().take() @@ -676,7 +751,9 @@ impl Node { log_error!(self.logger, "Stopping event handling timed out: {}", e); }, } - } + } else { + debug_assert!(false, "Expected a background processing task"); + }; #[cfg(tokio_unstable)] { From f9d65df9e6663617f07305cc728aa3ab63346d08 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 14 Aug 2025 18:38:57 +0200 Subject: [PATCH 051/184] Update CHANGELOG for v0.6.2 --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe613a07b..05813b621 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +# 0.6.2 - Aug. 14, 2025 +This patch release fixes a panic that could have been hit when syncing to a +TLS-enabled Electrum server, as well as some minor issues when shutting down +the node. + +## Bug Fixes and Improvements +- If not set by the user, we now install a default `CryptoProvider` for the + `rustls` TLS library. This fixes an issue that would have the node panic + whenever they first try to access an Electrum server behind an `ssl://` + address. (#600) +- We improved robustness of the shutdown procedure. In particular, we now + wait for more background tasks to finish processing before shutting down + LDK background processing. Previously some tasks were kept running which + could have lead to race conditions. (#613) + +In total, this release features 12 files changed, 198 insertions, 92 +deletions in 13 commits from 2 authors in alphabetical order: + +- Elias Rohrer +- moisesPomilio + # 0.6.1 - Jun. 19, 2025 This patch release fixes minor issues with the recently-exposed `Bolt11Invoice` type in bindings. From 80a5abcef8f45c61eaa2ef3a172ae48704068fa3 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 15 Aug 2025 11:05:44 +0200 Subject: [PATCH 052/184] Update Swift files for v0.6.2 --- Package.swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Package.swift b/Package.swift index 78a38f294..00f3eeb84 100644 --- a/Package.swift +++ b/Package.swift @@ -3,8 +3,8 @@ import PackageDescription -let tag = "v0.6.1" -let checksum = "73f53b615d5bfdf76f2e7233bde17a2a62631292ce506763a7150344230859c8" +let tag = "v0.6.2" +let checksum = "dee28eb2bc019eeb61cc28ca5c19fdada465a6eb2b5169d2dbaa369f0c63ba03" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" let package = Package( From 6e938fefe393f36eebefbab66af649faa5a1c38d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 15 Aug 2025 11:08:48 +0200 Subject: [PATCH 053/184] Drop nexus publishing plugin Maven Central recently deprecated the Sonatype-style publishing, which means the nexus publishing gradle plugin we used didn't work anymore. As Maven Central has yet to release a replacement plugin for gradle, we simply drop nexus publishing support here and manually upload the archives in the meantime, which is simple enough. We also drop the publishing CI jobs that originally aimed to automate publishing to Maven Central, which we however never came around to use since we didn't want to fully trust Github CI with publishing binaries for us. --- .github/workflows/publish-android.yml | 43 ---------- .github/workflows/publish-jvm.yml | 86 ------------------- .../kotlin/ldk-node-android/build.gradle.kts | 20 +---- bindings/kotlin/ldk-node-jvm/build.gradle.kts | 21 +---- 4 files changed, 2 insertions(+), 168 deletions(-) delete mode 100644 .github/workflows/publish-android.yml delete mode 100644 .github/workflows/publish-jvm.yml diff --git a/.github/workflows/publish-android.yml b/.github/workflows/publish-android.yml deleted file mode 100644 index b6b24ac90..000000000 --- a/.github/workflows/publish-android.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: Publish ldk-node-android to Maven Central -on: [workflow_dispatch] - -jobs: - build: - runs-on: ubuntu-20.04 - steps: - - name: "Check out PR branch" - uses: actions/checkout@v2 - - - name: "Cache" - uses: actions/cache@v2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - ./target - key: ${{ runner.os }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }} - - - name: "Set up JDK" - uses: actions/setup-java@v2 - with: - distribution: temurin - java-version: 11 - - - name: "Install Rust Android targets" - run: rustup target add x86_64-linux-android aarch64-linux-android armv7-linux-androideabi - - - name: "Build ldk-node-android library" - run: | - export PATH=$PATH:$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/linux-x86_64/bin - ./scripts/uniffi_bindgen_generate_kotlin_android.sh - - - name: "Publish to Maven Local and Maven Central" - env: - ORG_GRADLE_PROJECT_signingKeyId: ${{ secrets.PGP_KEY_ID }} - ORG_GRADLE_PROJECT_signingKey: ${{ secrets.PGP_SECRET_KEY }} - ORG_GRADLE_PROJECT_signingPassword: ${{ secrets.PGP_PASSPHRASE }} - ORG_GRADLE_PROJECT_ossrhUsername: ${{ secrets.NEXUS_USERNAME }} - ORG_GRADLE_PROJECT_ossrhPassword: ${{ secrets.NEXUS_PASSWORD }} - run: | - cd bindings/kotlin/ldk-node-android - ./gradlew publishToSonatype closeAndReleaseSonatypeStagingRepository diff --git a/.github/workflows/publish-jvm.yml b/.github/workflows/publish-jvm.yml deleted file mode 100644 index 0ae40e0a1..000000000 --- a/.github/workflows/publish-jvm.yml +++ /dev/null @@ -1,86 +0,0 @@ -name: Publish ldk-node-jvm to Maven Central -on: [workflow_dispatch] - -jobs: - build-jvm-macOS-M1-native-lib: - name: "Create M1 and x86_64 JVM native binaries" - runs-on: macos-12 - steps: - - name: "Checkout publishing branch" - uses: actions/checkout@v2 - - - name: Cache - uses: actions/cache@v3 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - ./target - key: ${{ runner.os }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }} - - - name: Set up JDK - uses: actions/setup-java@v2 - with: - distribution: temurin - java-version: 11 - - - name: Install aarch64 Rust target - run: rustup target add aarch64-apple-darwin - - - name: Build ldk-node-jvm library - run: | - ./scripts/uniffi_bindgen_generate_kotlin.sh - - # build aarch64 + x86_64 native libraries and upload - - name: Upload macOS native libraries for reuse in publishing job - uses: actions/upload-artifact@v3 - with: - # name: no name is required because we upload the entire directory - # the default name "artifact" will be used - path: /Users/runner/work/ldk-node/ldk-node/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/ - - build-jvm-full-library: - name: "Create full ldk-node-jvm library" - needs: [build-jvm-macOS-M1-native-lib] - runs-on: ubuntu-20.04 - steps: - - name: "Check out PR branch" - uses: actions/checkout@v2 - - - name: "Cache" - uses: actions/cache@v2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - ./target - key: ${{ runner.os }}-${{ hashFiles('**/Cargo.toml','**/Cargo.lock') }} - - - name: "Set up JDK" - uses: actions/setup-java@v2 - with: - distribution: temurin - java-version: 11 - - - name: "Build ldk-node-jvm library" - run: | - ./scripts/uniffi_bindgen_generate_kotlin.sh - - - name: Download macOS native libraries from previous job - uses: actions/download-artifact@v4.1.7 - id: download - with: - # download the artifact created in the prior job (named "artifact") - name: artifact - path: ./bindings/kotlin/ldk-node-jvm/lib/src/main/resources/ - - - name: "Publish to Maven Local and Maven Central" - env: - ORG_GRADLE_PROJECT_signingKeyId: ${{ secrets.PGP_KEY_ID }} - ORG_GRADLE_PROJECT_signingKey: ${{ secrets.PGP_SECRET_KEY }} - ORG_GRADLE_PROJECT_signingPassword: ${{ secrets.PGP_PASSPHRASE }} - ORG_GRADLE_PROJECT_ossrhUsername: ${{ secrets.NEXUS_USERNAME }} - ORG_GRADLE_PROJECT_ossrhPassword: ${{ secrets.NEXUS_PASSWORD }} - run: | - cd bindings/kotlin/ldk-node-jvm - ./gradlew publishToSonatype closeAndReleaseSonatypeStagingRepository diff --git a/bindings/kotlin/ldk-node-android/build.gradle.kts b/bindings/kotlin/ldk-node-android/build.gradle.kts index ab7262dd7..bb38991d3 100644 --- a/bindings/kotlin/ldk-node-android/build.gradle.kts +++ b/bindings/kotlin/ldk-node-android/build.gradle.kts @@ -1,6 +1,7 @@ buildscript { repositories { google() + mavenCentral() } dependencies { classpath("com.android.tools.build:gradle:7.1.2") @@ -8,29 +9,10 @@ buildscript { } plugins { - id("io.github.gradle-nexus.publish-plugin") version "1.1.0" } // library version is defined in gradle.properties val libraryVersion: String by project -// These properties are required here so that the nexus publish-plugin -// finds a staging profile with the correct group (group is otherwise set as "") -// and knows whether to publish to a SNAPSHOT repository or not -// https://github.com/gradle-nexus/publish-plugin#applying-the-plugin group = "org.lightningdevkit" version = libraryVersion - -nexusPublishing { - repositories { - create("sonatype") { - nexusUrl.set(uri("https://s01.oss.sonatype.org/service/local/")) - snapshotRepositoryUrl.set(uri("https://s01.oss.sonatype.org/content/repositories/snapshots/")) - - val ossrhUsername: String? by project - val ossrhPassword: String? by project - username.set(ossrhUsername) - password.set(ossrhPassword) - } - } -} \ No newline at end of file diff --git a/bindings/kotlin/ldk-node-jvm/build.gradle.kts b/bindings/kotlin/ldk-node-jvm/build.gradle.kts index ab7262dd7..faf316ef0 100644 --- a/bindings/kotlin/ldk-node-jvm/build.gradle.kts +++ b/bindings/kotlin/ldk-node-jvm/build.gradle.kts @@ -1,36 +1,17 @@ buildscript { repositories { google() + mavenCentral() } dependencies { - classpath("com.android.tools.build:gradle:7.1.2") } } plugins { - id("io.github.gradle-nexus.publish-plugin") version "1.1.0" } // library version is defined in gradle.properties val libraryVersion: String by project -// These properties are required here so that the nexus publish-plugin -// finds a staging profile with the correct group (group is otherwise set as "") -// and knows whether to publish to a SNAPSHOT repository or not -// https://github.com/gradle-nexus/publish-plugin#applying-the-plugin group = "org.lightningdevkit" version = libraryVersion - -nexusPublishing { - repositories { - create("sonatype") { - nexusUrl.set(uri("https://s01.oss.sonatype.org/service/local/")) - snapshotRepositoryUrl.set(uri("https://s01.oss.sonatype.org/content/repositories/snapshots/")) - - val ossrhUsername: String? by project - val ossrhPassword: String? by project - username.set(ossrhUsername) - password.set(ossrhPassword) - } - } -} \ No newline at end of file From 89bdacb59171a103c46d47c56818df2d492f5ede Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 15 Aug 2025 11:12:09 +0200 Subject: [PATCH 054/184] Add simpler helper script to generate MD5 and SHA checksum files .. which we use before manually publishing to Maven Central. --- scripts/generate_checksum_files.sh | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 scripts/generate_checksum_files.sh diff --git a/scripts/generate_checksum_files.sh b/scripts/generate_checksum_files.sh new file mode 100644 index 000000000..bbfa41a9a --- /dev/null +++ b/scripts/generate_checksum_files.sh @@ -0,0 +1,5 @@ +#!/bin/bash +md5sum $1 | cut -d ' ' -f 1 > $1.md5 +sha1sum $1 | cut -d ' ' -f 1 > $1.sha1 +sha256sum $1 | cut -d ' ' -f 1 > $1.sha256 +sha512sum $1 | cut -d ' ' -f 1 > $1.sha512 From e57927a914b3872606e6167cb3e19cbfb6a59208 Mon Sep 17 00:00:00 2001 From: moisesPomilio <93723302+moisesPompilio@users.noreply.github.com> Date: Fri, 15 Aug 2025 09:59:49 -0300 Subject: [PATCH 055/184] Add reorg property test and refactor funding logic - Introduced a property test to verify reorg handling during both channel opening and closing (normal and force close). - Added `invalidate_block` helper to roll back the chain and regenerate blocks to the previous height. --- tests/common/mod.rs | 38 +++++++-- tests/reorg_test.rs | 193 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 226 insertions(+), 5 deletions(-) create mode 100644 tests/reorg_test.rs diff --git a/tests/common/mod.rs b/tests/common/mod.rs index daed86475..ab66f0fdd 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -40,7 +40,9 @@ use electrum_client::ElectrumApi; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; +use serde_json::{json, Value}; +use std::collections::HashMap; use std::env; use std::path::PathBuf; use std::sync::{Arc, RwLock}; @@ -395,6 +397,21 @@ pub(crate) fn generate_blocks_and_wait( println!("\n"); } +pub(crate) fn invalidate_blocks(bitcoind: &BitcoindClient, num_blocks: usize) { + let blockchain_info = bitcoind.get_blockchain_info().expect("failed to get blockchain info"); + let cur_height = blockchain_info.blocks as usize; + let target_height = cur_height - num_blocks + 1; + let block_hash = bitcoind + .get_block_hash(target_height as u64) + .expect("failed to get block hash") + .block_hash() + .expect("block hash should be present"); + bitcoind.invalidate_block(block_hash).expect("failed to invalidate block"); + let blockchain_info = bitcoind.get_blockchain_info().expect("failed to get blockchain info"); + let new_cur_height = blockchain_info.blocks as usize; + assert!(new_cur_height + num_blocks == cur_height); +} + pub(crate) fn wait_for_block(electrs: &E, min_height: usize) { let mut header = match electrs.block_headers_subscribe() { Ok(header) => header, @@ -474,18 +491,27 @@ pub(crate) fn premine_and_distribute_funds( let _ = bitcoind.load_wallet("ldk_node_test"); generate_blocks_and_wait(bitcoind, electrs, 101); - for addr in addrs { - let txid = bitcoind.send_to_address(&addr, amount).unwrap().0.parse().unwrap(); - wait_for_tx(electrs, txid); - } + let amounts: HashMap = + addrs.iter().map(|addr| (addr.to_string(), amount.to_btc())).collect(); + + let empty_account = json!(""); + let amounts_json = json!(amounts); + let txid = bitcoind + .call::("sendmany", &[empty_account, amounts_json]) + .unwrap() + .as_str() + .unwrap() + .parse() + .unwrap(); + wait_for_tx(electrs, txid); generate_blocks_and_wait(bitcoind, electrs, 1); } pub fn open_channel( node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, should_announce: bool, electrsd: &ElectrsD, -) { +) -> OutPoint { if should_announce { node_a .open_announced_channel( @@ -513,6 +539,8 @@ pub fn open_channel( let funding_txo_b = expect_channel_pending_event!(node_b, node_a.node_id()); assert_eq!(funding_txo_a, funding_txo_b); wait_for_tx(&electrsd.client, funding_txo_a.txid); + + funding_txo_a } pub(crate) fn do_channel_full_cycle( diff --git a/tests/reorg_test.rs b/tests/reorg_test.rs new file mode 100644 index 000000000..707b67e88 --- /dev/null +++ b/tests/reorg_test.rs @@ -0,0 +1,193 @@ +mod common; +use bitcoin::Amount; +use ldk_node::payment::{PaymentDirection, PaymentKind}; +use ldk_node::{Event, LightningBalance, PendingSweepBalance}; +use proptest::{prelude::prop, proptest}; +use std::collections::HashMap; + +use crate::common::{ + expect_event, generate_blocks_and_wait, invalidate_blocks, open_channel, + premine_and_distribute_funds, random_config, setup_bitcoind_and_electrsd, setup_node, + wait_for_outpoint_spend, TestChainSource, +}; + +proptest! { + #![proptest_config(proptest::test_runner::Config::with_cases(5))] + #[test] + fn reorg_test(reorg_depth in 1..=6usize, force_close in prop::bool::ANY) { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + + let chain_source_bitcoind = TestChainSource::BitcoindRpcSync(&bitcoind); + let chain_source_electrsd = TestChainSource::Electrum(&electrsd); + let chain_source_esplora = TestChainSource::Esplora(&electrsd); + + macro_rules! config_node { + ($chain_source: expr, $anchor_channels: expr) => {{ + let config_a = random_config($anchor_channels); + let node = setup_node(&$chain_source, config_a, None); + node + }}; + } + let anchor_channels = true; + let nodes = vec![ + config_node!(chain_source_electrsd, anchor_channels), + config_node!(chain_source_bitcoind, anchor_channels), + config_node!(chain_source_esplora, anchor_channels), + ]; + + let (bitcoind, electrs) = (&bitcoind.client, &electrsd.client); + macro_rules! reorg { + ($reorg_depth: expr) => {{ + invalidate_blocks(bitcoind, $reorg_depth); + generate_blocks_and_wait(bitcoind, electrs, $reorg_depth); + }}; + } + + let amount_sat = 2_100_000; + let addr_nodes = + nodes.iter().map(|node| node.onchain_payment().new_address().unwrap()).collect::>(); + premine_and_distribute_funds(bitcoind, electrs, addr_nodes, Amount::from_sat(amount_sat)); + + macro_rules! sync_wallets { + () => { + nodes.iter().for_each(|node| node.sync_wallets().unwrap()) + }; + } + sync_wallets!(); + nodes.iter().for_each(|node| { + assert_eq!(node.list_balances().spendable_onchain_balance_sats, amount_sat); + assert_eq!(node.list_balances().total_onchain_balance_sats, amount_sat); + }); + + + let mut nodes_funding_tx = HashMap::new(); + let funding_amount_sat = 2_000_000; + for (node, next_node) in nodes.iter().zip(nodes.iter().cycle().skip(1)) { + let funding_txo = open_channel(node, next_node, funding_amount_sat, true, &electrsd); + nodes_funding_tx.insert(node.node_id(), funding_txo); + } + + generate_blocks_and_wait(bitcoind, electrs, 6); + sync_wallets!(); + + reorg!(reorg_depth); + sync_wallets!(); + + macro_rules! collect_channel_ready_events { + ($node:expr, $expected:expr) => {{ + let mut user_channels = HashMap::new(); + for _ in 0..$expected { + match $node.wait_next_event() { + Event::ChannelReady { user_channel_id, counterparty_node_id, .. } => { + $node.event_handled().unwrap(); + user_channels.insert(counterparty_node_id, user_channel_id); + }, + other => panic!("Unexpected event: {:?}", other), + } + } + user_channels + }}; + } + + let mut node_channels_id = HashMap::new(); + for (i, node) in nodes.iter().enumerate() { + assert_eq!( + node + .list_payments_with_filter(|p| p.direction == PaymentDirection::Outbound + && matches!(p.kind, PaymentKind::Onchain { .. })) + .len(), + 1 + ); + + let user_channels = collect_channel_ready_events!(node, 2); + let next_node = nodes.get((i + 1) % nodes.len()).unwrap(); + let prev_node = nodes.get((i + nodes.len() - 1) % nodes.len()).unwrap(); + + assert!(user_channels.get(&Some(next_node.node_id())) != None); + assert!(user_channels.get(&Some(prev_node.node_id())) != None); + + let user_channel_id = + user_channels.get(&Some(next_node.node_id())).expect("Missing user channel for node"); + node_channels_id.insert(node.node_id(), *user_channel_id); + } + + + for (node, next_node) in nodes.iter().zip(nodes.iter().cycle().skip(1)) { + let user_channel_id = node_channels_id.get(&node.node_id()).expect("user channel id not exist"); + let funding = nodes_funding_tx.get(&node.node_id()).expect("Funding tx not exist"); + + if force_close { + node.force_close_channel(&user_channel_id, next_node.node_id(), None).unwrap(); + } else { + node.close_channel(&user_channel_id, next_node.node_id()).unwrap(); + } + + expect_event!(node, ChannelClosed); + expect_event!(next_node, ChannelClosed); + + wait_for_outpoint_spend(electrs, *funding); + } + + reorg!(reorg_depth); + sync_wallets!(); + + generate_blocks_and_wait(bitcoind, electrs, 1); + sync_wallets!(); + + if force_close { + nodes.iter().for_each(|node| { + node.sync_wallets().unwrap(); + // If there is no more balance, there is nothing to process here. + if node.list_balances().lightning_balances.len() < 1 { + return; + } + match node.list_balances().lightning_balances[0] { + LightningBalance::ClaimableAwaitingConfirmations { + confirmation_height, + .. + } => { + let cur_height = node.status().current_best_block.height; + let blocks_to_go = confirmation_height - cur_height; + generate_blocks_and_wait(bitcoind, electrs, blocks_to_go as usize); + node.sync_wallets().unwrap(); + }, + _ => panic!("Unexpected balance state for node_hub!"), + } + + assert!(node.list_balances().lightning_balances.len() < 2); + assert!(node.list_balances().pending_balances_from_channel_closures.len() > 0); + match node.list_balances().pending_balances_from_channel_closures[0] { + PendingSweepBalance::BroadcastAwaitingConfirmation { .. } => {}, + _ => panic!("Unexpected balance state!"), + } + + generate_blocks_and_wait(&bitcoind, electrs, 1); + node.sync_wallets().unwrap(); + assert!(node.list_balances().lightning_balances.len() < 2); + assert!(node.list_balances().pending_balances_from_channel_closures.len() > 0); + match node.list_balances().pending_balances_from_channel_closures[0] { + PendingSweepBalance::AwaitingThresholdConfirmations { .. } => {}, + _ => panic!("Unexpected balance state!"), + } + }); + } + + generate_blocks_and_wait(bitcoind, electrs, 6); + sync_wallets!(); + + reorg!(reorg_depth); + sync_wallets!(); + + let fee_sat = 7000; + // Check balance after close channel + nodes.iter().for_each(|node| { + assert!(node.list_balances().spendable_onchain_balance_sats > amount_sat - fee_sat); + assert!(node.list_balances().spendable_onchain_balance_sats < amount_sat); + + assert_eq!(node.list_balances().total_anchor_channels_reserve_sats, 0); + assert!(node.list_balances().lightning_balances.is_empty()); + + assert_eq!(node.next_event(), None); + }); + } +} From 48790021ae3c6315e1d3bf36e3a457e151cb4105 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 19 May 2025 16:27:28 +0200 Subject: [PATCH 056/184] Introduce `Runtime` object allowng to detect outer runtime context Instead of holding an `Arc>>` and dealing with stuff like `tokio::task::block_in_place` at all callsites, we introduce a `Runtime` object that takes care of the state transitions, and allows to detect and reuse an outer runtime context. We also adjust the `with_runtime` API to take a `tokio::runtime::Handle` rather than an `Arc`. --- bindings/ldk_node.udl | 7 +- src/builder.rs | 44 +++++++++++- src/chain/electrum.rs | 11 ++- src/chain/mod.rs | 3 +- src/event.rs | 69 +++++++++---------- src/gossip.rs | 33 +++------ src/lib.rs | 133 +++++++++++++++---------------------- src/liquidity.rs | 53 ++++++--------- src/payment/bolt11.rs | 87 ++++++++++++------------ src/payment/bolt12.rs | 24 ++++--- src/payment/onchain.rs | 14 ++-- src/payment/spontaneous.rs | 14 ++-- src/runtime.rs | 72 ++++++++++++++++++++ 13 files changed, 308 insertions(+), 256 deletions(-) create mode 100644 src/runtime.rs diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 26480ca4b..f3560ec09 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -64,7 +64,7 @@ dictionary LogRecord { [Trait, WithForeign] interface LogWriter { - void log(LogRecord record); + void log(LogRecord record); }; interface Builder { @@ -161,8 +161,8 @@ interface Node { [Enum] interface Bolt11InvoiceDescription { - Hash(string hash); - Direct(string description); + Hash(string hash); + Direct(string description); }; interface Bolt11Payment { @@ -335,6 +335,7 @@ enum BuildError { "InvalidListeningAddresses", "InvalidAnnouncementAddresses", "InvalidNodeAlias", + "RuntimeSetupFailed", "ReadFailed", "WriteFailed", "StoragePathAccessFailed", diff --git a/src/builder.rs b/src/builder.rs index 1152f18c3..729cefe1b 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -28,6 +28,7 @@ use crate::liquidity::{ use crate::logger::{log_error, log_info, LdkLogger, LogLevel, LogWriter, Logger}; use crate::message_handler::NodeCustomMessageHandler; use crate::peer_store::PeerStore; +use crate::runtime::Runtime; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeysManager, MessageRouter, @@ -168,6 +169,8 @@ pub enum BuildError { InvalidAnnouncementAddresses, /// The provided alias is invalid. InvalidNodeAlias, + /// An attempt to setup a runtime has failed. + RuntimeSetupFailed, /// We failed to read data from the [`KVStore`]. /// /// [`KVStore`]: lightning::util::persist::KVStore @@ -205,6 +208,7 @@ impl fmt::Display for BuildError { Self::InvalidAnnouncementAddresses => { write!(f, "Given announcement addresses are invalid.") }, + Self::RuntimeSetupFailed => write!(f, "Failed to setup a runtime."), Self::ReadFailed => write!(f, "Failed to read from store."), Self::WriteFailed => write!(f, "Failed to write to store."), Self::StoragePathAccessFailed => write!(f, "Failed to access the given storage path."), @@ -236,6 +240,7 @@ pub struct NodeBuilder { gossip_source_config: Option, liquidity_source_config: Option, log_writer_config: Option, + runtime_handle: Option, } impl NodeBuilder { @@ -252,6 +257,7 @@ impl NodeBuilder { let gossip_source_config = None; let liquidity_source_config = None; let log_writer_config = None; + let runtime_handle = None; Self { config, entropy_source_config, @@ -259,9 +265,20 @@ impl NodeBuilder { gossip_source_config, liquidity_source_config, log_writer_config, + runtime_handle, } } + /// Configures the [`Node`] instance to (re-)use a specific `tokio` runtime. + /// + /// If not provided, the node will spawn its own runtime or reuse any outer runtime context it + /// can detect. + #[cfg_attr(feature = "uniffi", allow(dead_code))] + pub fn set_runtime(&mut self, runtime_handle: tokio::runtime::Handle) -> &mut Self { + self.runtime_handle = Some(runtime_handle); + self + } + /// Configures the [`Node`] instance to source its wallet entropy from a seed file on disk. /// /// If the given file does not exist a new random seed file will be generated and @@ -650,6 +667,15 @@ impl NodeBuilder { ) -> Result { let logger = setup_logger(&self.log_writer_config, &self.config)?; + let runtime = if let Some(handle) = self.runtime_handle.as_ref() { + Arc::new(Runtime::with_handle(handle.clone())) + } else { + Arc::new(Runtime::new().map_err(|e| { + log_error!(logger, "Failed to setup tokio runtime: {}", e); + BuildError::RuntimeSetupFailed + })?) + }; + let seed_bytes = seed_bytes_from_config( &self.config, self.entropy_source_config.as_ref(), @@ -678,6 +704,7 @@ impl NodeBuilder { self.gossip_source_config.as_ref(), self.liquidity_source_config.as_ref(), seed_bytes, + runtime, logger, Arc::new(vss_store), ) @@ -687,6 +714,15 @@ impl NodeBuilder { pub fn build_with_store(&self, kv_store: Arc) -> Result { let logger = setup_logger(&self.log_writer_config, &self.config)?; + let runtime = if let Some(handle) = self.runtime_handle.as_ref() { + Arc::new(Runtime::with_handle(handle.clone())) + } else { + Arc::new(Runtime::new().map_err(|e| { + log_error!(logger, "Failed to setup tokio runtime: {}", e); + BuildError::RuntimeSetupFailed + })?) + }; + let seed_bytes = seed_bytes_from_config( &self.config, self.entropy_source_config.as_ref(), @@ -700,6 +736,7 @@ impl NodeBuilder { self.gossip_source_config.as_ref(), self.liquidity_source_config.as_ref(), seed_bytes, + runtime, logger, kv_store, ) @@ -1049,7 +1086,7 @@ fn build_with_store_internal( config: Arc, chain_data_source_config: Option<&ChainDataSourceConfig>, gossip_source_config: Option<&GossipSourceConfig>, liquidity_source_config: Option<&LiquiditySourceConfig>, seed_bytes: [u8; 64], - logger: Arc, kv_store: Arc, + runtime: Arc, logger: Arc, kv_store: Arc, ) -> Result { optionally_install_rustls_cryptoprovider(); @@ -1241,8 +1278,6 @@ fn build_with_store_internal( }, }; - let runtime = Arc::new(RwLock::new(None)); - // Initialize the ChainMonitor let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( Some(Arc::clone(&chain_source)), @@ -1637,6 +1672,8 @@ fn build_with_store_internal( let background_tasks = Mutex::new(None); let cancellable_background_tasks = Mutex::new(None); + let is_running = Arc::new(RwLock::new(false)); + Ok(Node { runtime, stop_sender, @@ -1664,6 +1701,7 @@ fn build_with_store_internal( scorer, peer_store, payment_store, + is_running, is_listening, node_metrics, }) diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index abbb758dd..b6d37409b 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -18,6 +18,7 @@ use crate::fee_estimator::{ }; use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::runtime::Runtime; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::NodeMetrics; @@ -86,7 +87,7 @@ impl ElectrumChainSource { } } - pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { + pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { self.electrum_runtime_status.write().unwrap().start( self.server_url.clone(), Arc::clone(&runtime), @@ -339,7 +340,7 @@ impl ElectrumRuntimeStatus { } pub(super) fn start( - &mut self, server_url: String, runtime: Arc, config: Arc, + &mut self, server_url: String, runtime: Arc, config: Arc, logger: Arc, ) -> Result<(), Error> { match self { @@ -403,15 +404,14 @@ struct ElectrumRuntimeClient { electrum_client: Arc, bdk_electrum_client: Arc>, tx_sync: Arc>>, - runtime: Arc, + runtime: Arc, config: Arc, logger: Arc, } impl ElectrumRuntimeClient { fn new( - server_url: String, runtime: Arc, config: Arc, - logger: Arc, + server_url: String, runtime: Arc, config: Arc, logger: Arc, ) -> Result { let electrum_config = ElectrumConfigBuilder::new() .retry(ELECTRUM_CLIENT_NUM_RETRIES) @@ -544,7 +544,6 @@ impl ElectrumRuntimeClient { let spawn_fut = self.runtime.spawn_blocking(move || electrum_client.transaction_broadcast(&tx)); - let timeout_fut = tokio::time::timeout(Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), spawn_fut); diff --git a/src/chain/mod.rs b/src/chain/mod.rs index a4ab2c76b..f3a29e984 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -19,6 +19,7 @@ use crate::config::{ use crate::fee_estimator::OnchainFeeEstimator; use crate::io::utils::write_node_metrics; use crate::logger::{log_debug, log_info, log_trace, LdkLogger, Logger}; +use crate::runtime::Runtime; use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -185,7 +186,7 @@ impl ChainSource { Self { kind, tx_broadcaster, logger } } - pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { + pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { match &self.kind { ChainSourceKind::Electrum(electrum_chain_source) => { electrum_chain_source.start(runtime)? diff --git a/src/event.rs b/src/event.rs index 22848bec1..ae81f50e9 100644 --- a/src/event.rs +++ b/src/event.rs @@ -29,6 +29,8 @@ use crate::io::{ }; use crate::logger::{log_debug, log_error, log_info, LdkLogger}; +use crate::runtime::Runtime; + use lightning::events::bump_transaction::BumpTransactionEvent; use lightning::events::{ClosureReason, PaymentPurpose, ReplayEvent}; use lightning::events::{Event as LdkEvent, PaymentFailureReason}; @@ -53,7 +55,7 @@ use core::future::Future; use core::task::{Poll, Waker}; use std::collections::VecDeque; use std::ops::Deref; -use std::sync::{Arc, Condvar, Mutex, RwLock}; +use std::sync::{Arc, Condvar, Mutex}; use std::time::Duration; /// An event emitted by [`Node`], which should be handled by the user. @@ -451,7 +453,7 @@ where liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>, - runtime: Arc>>>, + runtime: Arc, logger: L, config: Arc, } @@ -466,8 +468,8 @@ where channel_manager: Arc, connection_manager: Arc>, output_sweeper: Arc, network_graph: Arc, liquidity_source: Option>>>, - payment_store: Arc, peer_store: Arc>, - runtime: Arc>>>, logger: L, config: Arc, + payment_store: Arc, peer_store: Arc>, runtime: Arc, + logger: L, config: Arc, ) -> Self { Self { event_queue, @@ -1049,17 +1051,14 @@ where let forwarding_channel_manager = self.channel_manager.clone(); let min = time_forwardable.as_millis() as u64; - let runtime_lock = self.runtime.read().unwrap(); - debug_assert!(runtime_lock.is_some()); + let future = async move { + let millis_to_sleep = thread_rng().gen_range(min..min * 5) as u64; + tokio::time::sleep(Duration::from_millis(millis_to_sleep)).await; - if let Some(runtime) = runtime_lock.as_ref() { - runtime.spawn(async move { - let millis_to_sleep = thread_rng().gen_range(min..min * 5) as u64; - tokio::time::sleep(Duration::from_millis(millis_to_sleep)).await; + forwarding_channel_manager.process_pending_htlc_forwards(); + }; - forwarding_channel_manager.process_pending_htlc_forwards(); - }); - } + self.runtime.spawn(future); }, LdkEvent::SpendableOutputs { outputs, channel_id } => { match self.output_sweeper.track_spendable_outputs(outputs, channel_id, true, None) { @@ -1421,31 +1420,27 @@ where debug_assert!(false, "We currently don't handle BOLT12 invoices manually, so this event should never be emitted."); }, LdkEvent::ConnectionNeeded { node_id, addresses } => { - let runtime_lock = self.runtime.read().unwrap(); - debug_assert!(runtime_lock.is_some()); - - if let Some(runtime) = runtime_lock.as_ref() { - let spawn_logger = self.logger.clone(); - let spawn_cm = Arc::clone(&self.connection_manager); - runtime.spawn(async move { - for addr in &addresses { - match spawn_cm.connect_peer_if_necessary(node_id, addr.clone()).await { - Ok(()) => { - return; - }, - Err(e) => { - log_error!( - spawn_logger, - "Failed to establish connection to peer {}@{}: {}", - node_id, - addr, - e - ); - }, - } + let spawn_logger = self.logger.clone(); + let spawn_cm = Arc::clone(&self.connection_manager); + let future = async move { + for addr in &addresses { + match spawn_cm.connect_peer_if_necessary(node_id, addr.clone()).await { + Ok(()) => { + return; + }, + Err(e) => { + log_error!( + spawn_logger, + "Failed to establish connection to peer {}@{}: {}", + node_id, + addr, + e + ); + }, } - }); - } + } + }; + self.runtime.spawn(future); }, LdkEvent::BumpTransaction(bte) => { match bte { diff --git a/src/gossip.rs b/src/gossip.rs index a8a6e3831..1185f0718 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -7,7 +7,8 @@ use crate::chain::ChainSource; use crate::config::RGS_SYNC_TIMEOUT_SECS; -use crate::logger::{log_error, log_trace, LdkLogger, Logger}; +use crate::logger::{log_trace, LdkLogger, Logger}; +use crate::runtime::Runtime; use crate::types::{GossipSync, Graph, P2PGossipSync, PeerManager, RapidGossipSync, UtxoLookup}; use crate::Error; @@ -15,13 +16,12 @@ use lightning_block_sync::gossip::{FutureSpawner, GossipVerifier}; use std::future::Future; use std::sync::atomic::{AtomicU32, Ordering}; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::time::Duration; pub(crate) enum GossipSource { P2PNetwork { gossip_sync: Arc, - logger: Arc, }, RapidGossipSync { gossip_sync: Arc, @@ -38,7 +38,7 @@ impl GossipSource { None::>, Arc::clone(&logger), )); - Self::P2PNetwork { gossip_sync, logger } + Self::P2PNetwork { gossip_sync } } pub fn new_rgs( @@ -63,12 +63,12 @@ impl GossipSource { pub(crate) fn set_gossip_verifier( &self, chain_source: Arc, peer_manager: Arc, - runtime: Arc>>>, + runtime: Arc, ) { match self { - Self::P2PNetwork { gossip_sync, logger } => { + Self::P2PNetwork { gossip_sync } => { if let Some(utxo_source) = chain_source.as_utxo_source() { - let spawner = RuntimeSpawner::new(Arc::clone(&runtime), Arc::clone(&logger)); + let spawner = RuntimeSpawner::new(Arc::clone(&runtime)); let gossip_verifier = Arc::new(GossipVerifier::new( utxo_source, spawner, @@ -133,28 +133,17 @@ impl GossipSource { } pub(crate) struct RuntimeSpawner { - runtime: Arc>>>, - logger: Arc, + runtime: Arc, } impl RuntimeSpawner { - pub(crate) fn new( - runtime: Arc>>>, logger: Arc, - ) -> Self { - Self { runtime, logger } + pub(crate) fn new(runtime: Arc) -> Self { + Self { runtime } } } impl FutureSpawner for RuntimeSpawner { fn spawn + Send + 'static>(&self, future: T) { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { - log_error!(self.logger, "Tried spawing a future while the runtime wasn't available. This should never happen."); - debug_assert!(false, "Tried spawing a future while the runtime wasn't available. This should never happen."); - return; - } - - let runtime = rt_lock.as_ref().unwrap(); - runtime.spawn(future); + self.runtime.spawn(future); } } diff --git a/src/lib.rs b/src/lib.rs index a3cce0752..cc5e383a1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -94,6 +94,7 @@ pub mod logger; mod message_handler; pub mod payment; mod peer_store; +mod runtime; mod sweep; mod tx_broadcaster; mod types; @@ -105,6 +106,7 @@ pub use lightning; pub use lightning_invoice; pub use lightning_liquidity; pub use lightning_types; +pub use tokio; pub use vss_client; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; @@ -141,6 +143,7 @@ use payment::{ UnifiedQrPayment, }; use peer_store::{PeerInfo, PeerStore}; +use runtime::Runtime; use types::{ Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, @@ -176,7 +179,7 @@ uniffi::include_scaffolding!("ldk_node"); /// /// Needs to be initialized and instantiated through [`Builder::build`]. pub struct Node { - runtime: Arc>>>, + runtime: Arc, stop_sender: tokio::sync::watch::Sender<()>, background_processor_task: Mutex>>, background_tasks: Mutex>>, @@ -202,6 +205,7 @@ pub struct Node { scorer: Arc>, peer_store: Arc>>, payment_store: Arc, + is_running: Arc>, is_listening: Arc, node_metrics: Arc>, } @@ -210,33 +214,21 @@ impl Node { /// Starts the necessary background tasks, such as handling events coming from user input, /// LDK/BDK, and the peer-to-peer network. /// - /// After this returns, the [`Node`] instance can be controlled via the provided API methods in - /// a thread-safe manner. - pub fn start(&self) -> Result<(), Error> { - let runtime = - Arc::new(tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap()); - self.start_with_runtime(runtime) - } - - /// Starts the necessary background tasks (such as handling events coming from user input, - /// LDK/BDK, and the peer-to-peer network) on the the given `runtime`. - /// - /// This allows to have LDK Node reuse an outer pre-existing runtime, e.g., to avoid stacking Tokio - /// runtime contexts. + /// This will try to auto-detect an outer pre-existing runtime, e.g., to avoid stacking Tokio + /// runtime contexts. Note we require the outer runtime to be of the `multithreaded` flavor. /// /// After this returns, the [`Node`] instance can be controlled via the provided API methods in /// a thread-safe manner. - pub fn start_with_runtime(&self, runtime: Arc) -> Result<(), Error> { + pub fn start(&self) -> Result<(), Error> { // Acquire a run lock and hold it until we're setup. - let mut runtime_lock = self.runtime.write().unwrap(); - if runtime_lock.is_some() { - // We're already running. + let mut is_running_lock = self.is_running.write().unwrap(); + if *is_running_lock { return Err(Error::AlreadyRunning); } let mut background_tasks = tokio::task::JoinSet::new(); let mut cancellable_background_tasks = tokio::task::JoinSet::new(); - let runtime_handle = runtime.handle(); + let runtime_handle = self.runtime.handle(); log_info!( self.logger, @@ -246,17 +238,14 @@ impl Node { ); // Start up any runtime-dependant chain sources (e.g. Electrum) - self.chain_source.start(Arc::clone(&runtime)).map_err(|e| { + self.chain_source.start(Arc::clone(&self.runtime)).map_err(|e| { log_error!(self.logger, "Failed to start chain syncing: {}", e); e })?; // Block to ensure we update our fee rate cache once on startup let chain_source = Arc::clone(&self.chain_source); - let runtime_ref = &runtime; - tokio::task::block_in_place(move || { - runtime_ref.block_on(async move { chain_source.update_fee_rate_estimates().await }) - })?; + self.runtime.block_on(async move { chain_source.update_fee_rate_estimates().await })?; // Spawn background task continuously syncing onchain, lightning, and fee rate cache. let stop_sync_receiver = self.stop_sender.subscribe(); @@ -574,7 +563,7 @@ impl Node { }) }; - let handle = runtime.spawn(async move { + let handle = self.runtime.spawn(async move { process_events_async( background_persister, |e| background_event_handler.handle_event(e), @@ -621,8 +610,6 @@ impl Node { ); } - *runtime_lock = Some(runtime); - debug_assert!(self.background_tasks.lock().unwrap().is_none()); *self.background_tasks.lock().unwrap() = Some(background_tasks); @@ -630,6 +617,7 @@ impl Node { *self.cancellable_background_tasks.lock().unwrap() = Some(cancellable_background_tasks); log_info!(self.logger, "Startup complete."); + *is_running_lock = true; Ok(()) } @@ -637,9 +625,10 @@ impl Node { /// /// After this returns most API methods will return [`Error::NotRunning`]. pub fn stop(&self) -> Result<(), Error> { - let runtime = self.runtime.write().unwrap().take().ok_or(Error::NotRunning)?; - #[cfg(tokio_unstable)] - let metrics_runtime = Arc::clone(&runtime); + let mut is_running_lock = self.is_running.write().unwrap(); + if !*is_running_lock { + return Err(Error::NotRunning); + } log_info!(self.logger, "Shutting down LDK Node with node ID {}...", self.node_id()); @@ -661,10 +650,10 @@ impl Node { // Cancel cancellable background tasks if let Some(mut tasks) = self.cancellable_background_tasks.lock().unwrap().take() { - let runtime_2 = Arc::clone(&runtime); + let runtime_handle = self.runtime.handle(); tasks.abort_all(); tokio::task::block_in_place(move || { - runtime_2.block_on(async { while let Some(_) = tasks.join_next().await {} }) + runtime_handle.block_on(async { while let Some(_) = tasks.join_next().await {} }) }); } else { debug_assert!(false, "Expected some cancellable background tasks"); @@ -679,10 +668,10 @@ impl Node { log_debug!(self.logger, "Stopped chain sources."); // Wait until non-cancellable background tasks (mod LDK's background processor) are done. - let runtime_3 = Arc::clone(&runtime); + let runtime_handle = self.runtime.handle(); if let Some(mut tasks) = self.background_tasks.lock().unwrap().take() { tokio::task::block_in_place(move || { - runtime_3.block_on(async { + runtime_handle.block_on(async { loop { let timeout_fut = tokio::time::timeout( Duration::from_secs(BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS), @@ -724,7 +713,7 @@ impl Node { { let abort_handle = background_processor_task.abort_handle(); let timeout_res = tokio::task::block_in_place(move || { - runtime.block_on(async { + self.runtime.block_on(async { tokio::time::timeout( Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS), background_processor_task, @@ -757,20 +746,22 @@ impl Node { #[cfg(tokio_unstable)] { + let runtime_handle = self.runtime.handle(); log_trace!( self.logger, "Active runtime tasks left prior to shutdown: {}", - metrics_runtime.metrics().active_tasks_count() + runtime_handle.metrics().active_tasks_count() ); } log_info!(self.logger, "Shutdown complete."); + *is_running_lock = false; Ok(()) } /// Returns the status of the [`Node`]. pub fn status(&self) -> NodeStatus { - let is_running = self.runtime.read().unwrap().is_some(); + let is_running = *self.is_running.read().unwrap(); let is_listening = self.is_listening.load(Ordering::Acquire); let current_best_block = self.channel_manager.current_best_block().into(); let locked_node_metrics = self.node_metrics.read().unwrap(); @@ -891,6 +882,7 @@ impl Node { Arc::clone(&self.payment_store), Arc::clone(&self.peer_store), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), ) } @@ -908,6 +900,7 @@ impl Node { Arc::clone(&self.payment_store), Arc::clone(&self.peer_store), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), )) } @@ -918,9 +911,9 @@ impl Node { #[cfg(not(feature = "uniffi"))] pub fn bolt12_payment(&self) -> Bolt12Payment { Bolt12Payment::new( - Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.is_running), Arc::clone(&self.logger), ) } @@ -931,9 +924,9 @@ impl Node { #[cfg(feature = "uniffi")] pub fn bolt12_payment(&self) -> Arc { Arc::new(Bolt12Payment::new( - Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.is_running), Arc::clone(&self.logger), )) } @@ -942,11 +935,11 @@ impl Node { #[cfg(not(feature = "uniffi"))] pub fn spontaneous_payment(&self) -> SpontaneousPayment { SpontaneousPayment::new( - Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.keys_manager), Arc::clone(&self.payment_store), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), ) } @@ -955,11 +948,11 @@ impl Node { #[cfg(feature = "uniffi")] pub fn spontaneous_payment(&self) -> Arc { Arc::new(SpontaneousPayment::new( - Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.keys_manager), Arc::clone(&self.payment_store), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), )) } @@ -968,10 +961,10 @@ impl Node { #[cfg(not(feature = "uniffi"))] pub fn onchain_payment(&self) -> OnchainPayment { OnchainPayment::new( - Arc::clone(&self.runtime), Arc::clone(&self.wallet), Arc::clone(&self.channel_manager), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), ) } @@ -980,10 +973,10 @@ impl Node { #[cfg(feature = "uniffi")] pub fn onchain_payment(&self) -> Arc { Arc::new(OnchainPayment::new( - Arc::clone(&self.runtime), Arc::clone(&self.wallet), Arc::clone(&self.channel_manager), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), )) } @@ -1061,11 +1054,9 @@ impl Node { pub fn connect( &self, node_id: PublicKey, address: SocketAddress, persist: bool, ) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } - let runtime = rt_lock.as_ref().unwrap(); let peer_info = PeerInfo { node_id, address }; @@ -1075,10 +1066,8 @@ impl Node { // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; log_info!(self.logger, "Connected to peer {}@{}. ", peer_info.node_id, peer_info.address); @@ -1095,8 +1084,7 @@ impl Node { /// Will also remove the peer from the peer store, i.e., after this has been called we won't /// try to reconnect on restart. pub fn disconnect(&self, counterparty_node_id: PublicKey) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } @@ -1118,11 +1106,9 @@ impl Node { push_to_counterparty_msat: Option, channel_config: Option, announce_for_forwarding: bool, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } - let runtime = rt_lock.as_ref().unwrap(); let peer_info = PeerInfo { node_id, address }; @@ -1146,10 +1132,8 @@ impl Node { // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; // Fail if we have less than the channel value + anchor reserve available (if applicable). @@ -1298,8 +1282,7 @@ impl Node { /// /// [`EsploraSyncConfig::background_sync_config`]: crate::config::EsploraSyncConfig::background_sync_config pub fn sync_wallets(&self) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } @@ -1307,24 +1290,16 @@ impl Node { let sync_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); - tokio::task::block_in_place(move || { - tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on( - async move { - if chain_source.is_transaction_based() { - chain_source.update_fee_rate_estimates().await?; - chain_source - .sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper) - .await?; - chain_source.sync_onchain_wallet().await?; - } else { - chain_source.update_fee_rate_estimates().await?; - chain_source - .poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper) - .await?; - } - Ok(()) - }, - ) + self.runtime.block_on(async move { + if chain_source.is_transaction_based() { + chain_source.update_fee_rate_estimates().await?; + chain_source.sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper).await?; + chain_source.sync_onchain_wallet().await?; + } else { + chain_source.update_fee_rate_estimates().await?; + chain_source.poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper).await?; + } + Ok(()) }) } diff --git a/src/liquidity.rs b/src/liquidity.rs index a4516edd0..9b103ee82 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -10,6 +10,7 @@ use crate::chain::ChainSource; use crate::connection::ConnectionManager; use crate::logger::{log_debug, log_error, log_info, LdkLogger, Logger}; +use crate::runtime::Runtime; use crate::types::{ChannelManager, KeysManager, LiquidityManager, PeerManager, Wallet}; use crate::{total_anchor_channels_reserve_sats, Config, Error}; @@ -1388,7 +1389,7 @@ pub(crate) struct LSPS2BuyResponse { /// [`Bolt11Payment::receive_via_jit_channel`]: crate::payment::Bolt11Payment::receive_via_jit_channel #[derive(Clone)] pub struct LSPS1Liquidity { - runtime: Arc>>>, + runtime: Arc, wallet: Arc, connection_manager: Arc>>, liquidity_source: Option>>>, @@ -1397,7 +1398,7 @@ pub struct LSPS1Liquidity { impl LSPS1Liquidity { pub(crate) fn new( - runtime: Arc>>>, wallet: Arc, + runtime: Arc, wallet: Arc, connection_manager: Arc>>, liquidity_source: Option>>>, logger: Arc, ) -> Self { @@ -1418,19 +1419,14 @@ impl LSPS1Liquidity { let (lsp_node_id, lsp_address) = liquidity_source.get_lsps1_lsp_details().ok_or(Error::LiquiditySourceUnavailable)?; - let rt_lock = self.runtime.read().unwrap(); - let runtime = rt_lock.as_ref().unwrap(); - let con_node_id = lsp_node_id; let con_addr = lsp_address.clone(); let con_cm = Arc::clone(&self.connection_manager); // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; log_info!(self.logger, "Connected to LSP {}@{}. ", lsp_node_id, lsp_address); @@ -1438,18 +1434,16 @@ impl LSPS1Liquidity { let refund_address = self.wallet.get_new_address()?; let liquidity_source = Arc::clone(&liquidity_source); - let response = tokio::task::block_in_place(move || { - runtime.block_on(async move { - liquidity_source - .lsps1_request_channel( - lsp_balance_sat, - client_balance_sat, - channel_expiry_blocks, - announce_channel, - refund_address, - ) - .await - }) + let response = self.runtime.block_on(async move { + liquidity_source + .lsps1_request_channel( + lsp_balance_sat, + client_balance_sat, + channel_expiry_blocks, + announce_channel, + refund_address, + ) + .await })?; Ok(response) @@ -1463,27 +1457,20 @@ impl LSPS1Liquidity { let (lsp_node_id, lsp_address) = liquidity_source.get_lsps1_lsp_details().ok_or(Error::LiquiditySourceUnavailable)?; - let rt_lock = self.runtime.read().unwrap(); - let runtime = rt_lock.as_ref().unwrap(); - let con_node_id = lsp_node_id; let con_addr = lsp_address.clone(); let con_cm = Arc::clone(&self.connection_manager); // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; let liquidity_source = Arc::clone(&liquidity_source); - let response = tokio::task::block_in_place(move || { - runtime - .block_on(async move { liquidity_source.lsps1_check_order_status(order_id).await }) - })?; - + let response = self + .runtime + .block_on(async move { liquidity_source.lsps1_check_order_status(order_id).await })?; Ok(response) } } diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 817a428bd..389c818c8 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -22,6 +22,7 @@ use crate::payment::store::{ }; use crate::payment::SendingParameters; use crate::peer_store::{PeerInfo, PeerStore}; +use crate::runtime::Runtime; use crate::types::{ChannelManager, PaymentStore}; use lightning::ln::bolt11_payment; @@ -57,24 +58,24 @@ type Bolt11InvoiceDescription = crate::ffi::Bolt11InvoiceDescription; /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [`Node::bolt11_payment`]: crate::Node::bolt11_payment pub struct Bolt11Payment { - runtime: Arc>>>, + runtime: Arc, channel_manager: Arc, connection_manager: Arc>>, liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>>, config: Arc, + is_running: Arc>, logger: Arc, } impl Bolt11Payment { pub(crate) fn new( - runtime: Arc>>>, - channel_manager: Arc, + runtime: Arc, channel_manager: Arc, connection_manager: Arc>>, liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>>, - config: Arc, logger: Arc, + config: Arc, is_running: Arc>, logger: Arc, ) -> Self { Self { runtime, @@ -84,6 +85,7 @@ impl Bolt11Payment { payment_store, peer_store, config, + is_running, logger, } } @@ -95,12 +97,12 @@ impl Bolt11Payment { pub fn send( &self, invoice: &Bolt11Invoice, sending_parameters: Option, ) -> Result { - let invoice = maybe_deref(invoice); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } + let invoice = maybe_deref(invoice); + let (payment_hash, recipient_onion, mut route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { log_error!(self.logger, "Failed to send payment due to the given invoice being \"zero-amount\". Please use send_using_amount instead."); Error::InvalidInvoice @@ -204,12 +206,12 @@ impl Bolt11Payment { &self, invoice: &Bolt11Invoice, amount_msat: u64, sending_parameters: Option, ) -> Result { - let invoice = maybe_deref(invoice); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } + let invoice = maybe_deref(invoice); + if let Some(invoice_amount_msat) = invoice.amount_milli_satoshis() { if amount_msat < invoice_amount_msat { log_error!( @@ -619,9 +621,6 @@ impl Bolt11Payment { let (node_id, address) = liquidity_source.get_lsps2_lsp_details().ok_or(Error::LiquiditySourceUnavailable)?; - let rt_lock = self.runtime.read().unwrap(); - let runtime = rt_lock.as_ref().unwrap(); - let peer_info = PeerInfo { node_id, address }; let con_node_id = peer_info.node_id; @@ -630,39 +629,35 @@ impl Bolt11Payment { // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; log_info!(self.logger, "Connected to LSP {}@{}. ", peer_info.node_id, peer_info.address); let liquidity_source = Arc::clone(&liquidity_source); let (invoice, lsp_total_opening_fee, lsp_prop_opening_fee) = - tokio::task::block_in_place(move || { - runtime.block_on(async move { - if let Some(amount_msat) = amount_msat { - liquidity_source - .lsps2_receive_to_jit_channel( - amount_msat, - description, - expiry_secs, - max_total_lsp_fee_limit_msat, - ) - .await - .map(|(invoice, total_fee)| (invoice, Some(total_fee), None)) - } else { - liquidity_source - .lsps2_receive_variable_amount_to_jit_channel( - description, - expiry_secs, - max_proportional_lsp_fee_limit_ppm_msat, - ) - .await - .map(|(invoice, prop_fee)| (invoice, None, Some(prop_fee))) - } - }) + self.runtime.block_on(async move { + if let Some(amount_msat) = amount_msat { + liquidity_source + .lsps2_receive_to_jit_channel( + amount_msat, + description, + expiry_secs, + max_total_lsp_fee_limit_msat, + ) + .await + .map(|(invoice, total_fee)| (invoice, Some(total_fee), None)) + } else { + liquidity_source + .lsps2_receive_variable_amount_to_jit_channel( + description, + expiry_secs, + max_proportional_lsp_fee_limit_ppm_msat, + ) + .await + .map(|(invoice, prop_fee)| (invoice, None, Some(prop_fee))) + } })?; // Register payment in payment store. @@ -712,12 +707,12 @@ impl Bolt11Payment { /// amount times [`Config::probing_liquidity_limit_multiplier`] won't be used to send /// pre-flight probes. pub fn send_probes(&self, invoice: &Bolt11Invoice) -> Result<(), Error> { - let invoice = maybe_deref(invoice); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } + let invoice = maybe_deref(invoice); + let (_payment_hash, _recipient_onion, route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { log_error!(self.logger, "Failed to send probes due to the given invoice being \"zero-amount\". Please use send_probes_using_amount instead."); Error::InvalidInvoice @@ -745,12 +740,12 @@ impl Bolt11Payment { pub fn send_probes_using_amount( &self, invoice: &Bolt11Invoice, amount_msat: u64, ) -> Result<(), Error> { - let invoice = maybe_deref(invoice); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } + let invoice = maybe_deref(invoice); + let (_payment_hash, _recipient_onion, route_params) = if let Some(invoice_amount_msat) = invoice.amount_milli_satoshis() { diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index b9efa3241..8e10b9f4f 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -49,19 +49,18 @@ type Refund = Arc; /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md /// [`Node::bolt12_payment`]: crate::Node::bolt12_payment pub struct Bolt12Payment { - runtime: Arc>>>, channel_manager: Arc, payment_store: Arc, + is_running: Arc>, logger: Arc, } impl Bolt12Payment { pub(crate) fn new( - runtime: Arc>>>, channel_manager: Arc, payment_store: Arc, - logger: Arc, + is_running: Arc>, logger: Arc, ) -> Self { - Self { runtime, channel_manager, payment_store, logger } + Self { channel_manager, payment_store, is_running, logger } } /// Send a payment given an offer. @@ -73,11 +72,12 @@ impl Bolt12Payment { pub fn send( &self, offer: &Offer, quantity: Option, payer_note: Option, ) -> Result { - let offer = maybe_deref(offer); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } + + let offer = maybe_deref(offer); + let mut random_bytes = [0u8; 32]; rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); @@ -175,12 +175,12 @@ impl Bolt12Payment { pub fn send_using_amount( &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, ) -> Result { - let offer = maybe_deref(offer); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } + let offer = maybe_deref(offer); + let mut random_bytes = [0u8; 32]; rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); @@ -346,6 +346,10 @@ impl Bolt12Payment { /// [`Refund`]: lightning::offers::refund::Refund /// [`Bolt12Invoice`]: lightning::offers::invoice::Bolt12Invoice pub fn request_refund_payment(&self, refund: &Refund) -> Result { + if !*self.is_running.read().unwrap() { + return Err(Error::NotRunning); + } + let refund = maybe_deref(refund); let invoice = self.channel_manager.request_refund_payment(&refund).map_err(|e| { log_error!(self.logger, "Failed to request refund payment: {:?}", e); diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index 046d66c69..2614e55ce 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -41,19 +41,19 @@ macro_rules! maybe_map_fee_rate_opt { /// /// [`Node::onchain_payment`]: crate::Node::onchain_payment pub struct OnchainPayment { - runtime: Arc>>>, wallet: Arc, channel_manager: Arc, config: Arc, + is_running: Arc>, logger: Arc, } impl OnchainPayment { pub(crate) fn new( - runtime: Arc>>>, wallet: Arc, - channel_manager: Arc, config: Arc, logger: Arc, + wallet: Arc, channel_manager: Arc, config: Arc, + is_running: Arc>, logger: Arc, ) -> Self { - Self { runtime, wallet, channel_manager, config, logger } + Self { wallet, channel_manager, config, is_running, logger } } /// Retrieve a new on-chain/funding address. @@ -75,8 +75,7 @@ impl OnchainPayment { pub fn send_to_address( &self, address: &bitcoin::Address, amount_sats: u64, fee_rate: Option, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } @@ -106,8 +105,7 @@ impl OnchainPayment { pub fn send_all_to_address( &self, address: &bitcoin::Address, retain_reserves: bool, fee_rate: Option, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index a7e7876d7..3e48fd090 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -33,21 +33,21 @@ const LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA: u32 = 144; /// /// [`Node::spontaneous_payment`]: crate::Node::spontaneous_payment pub struct SpontaneousPayment { - runtime: Arc>>>, channel_manager: Arc, keys_manager: Arc, payment_store: Arc, config: Arc, + is_running: Arc>, logger: Arc, } impl SpontaneousPayment { pub(crate) fn new( - runtime: Arc>>>, channel_manager: Arc, keys_manager: Arc, - payment_store: Arc, config: Arc, logger: Arc, + payment_store: Arc, config: Arc, is_running: Arc>, + logger: Arc, ) -> Self { - Self { runtime, channel_manager, keys_manager, payment_store, config, logger } + Self { channel_manager, keys_manager, payment_store, config, is_running, logger } } /// Send a spontaneous aka. "keysend", payment. @@ -88,8 +88,7 @@ impl SpontaneousPayment { &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, custom_tlvs: Option>, preimage: Option, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } @@ -198,8 +197,7 @@ impl SpontaneousPayment { /// /// [`Bolt11Payment::send_probes`]: crate::payment::Bolt11Payment pub fn send_probes(&self, amount_msat: u64, node_id: PublicKey) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } diff --git a/src/runtime.rs b/src/runtime.rs new file mode 100644 index 000000000..4c1241165 --- /dev/null +++ b/src/runtime.rs @@ -0,0 +1,72 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use tokio::task::JoinHandle; + +use std::future::Future; + +pub(crate) struct Runtime { + mode: RuntimeMode, +} + +impl Runtime { + pub fn new() -> Result { + let mode = match tokio::runtime::Handle::try_current() { + Ok(handle) => RuntimeMode::Handle(handle), + Err(_) => { + let rt = tokio::runtime::Builder::new_multi_thread().enable_all().build()?; + RuntimeMode::Owned(rt) + }, + }; + Ok(Self { mode }) + } + + pub fn with_handle(handle: tokio::runtime::Handle) -> Self { + let mode = RuntimeMode::Handle(handle); + Self { mode } + } + + pub fn spawn(&self, future: F) -> JoinHandle + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + let handle = self.handle(); + handle.spawn(future) + } + + pub fn spawn_blocking(&self, func: F) -> JoinHandle + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let handle = self.handle(); + handle.spawn_blocking(func) + } + + pub fn block_on(&self, future: F) -> F::Output { + // While we generally decided not to overthink via which call graph users would enter our + // runtime context, we'd still try to reuse whatever current context would be present + // during `block_on`, as this is the context `block_in_place` would operate on. So we try + // to detect the outer context here, and otherwise use whatever was set during + // initialization. + let handle = tokio::runtime::Handle::try_current().unwrap_or(self.handle().clone()); + tokio::task::block_in_place(move || handle.block_on(future)) + } + + pub fn handle(&self) -> &tokio::runtime::Handle { + match &self.mode { + RuntimeMode::Owned(rt) => rt.handle(), + RuntimeMode::Handle(handle) => handle, + } + } +} + +enum RuntimeMode { + Owned(tokio::runtime::Runtime), + Handle(tokio::runtime::Handle), +} From 12bd254c02ff8565fde276fd073a7b288eb88be0 Mon Sep 17 00:00:00 2001 From: Andrei Date: Mon, 18 Aug 2025 00:00:00 +0000 Subject: [PATCH 057/184] Add corresponding `_for_hash` methods to receive via jit channel Add corresponding `receive_via_jit_channel_for_hash()` and `receive_variable_amount_via_jit_channel_for_hash()` methods that accept a custom payment hash from the user. These methods allow implementing swap-in functionality with a JIT channel (i.e., swapping an on-chain payment to Lightning by opening a new channel). --- bindings/ldk_node.udl | 4 ++ src/event.rs | 3 +- src/liquidity.rs | 46 ++++++++++---- src/payment/bolt11.rs | 98 +++++++++++++++++++++++++++- tests/integration_tests_rust.rs | 109 ++++++++++++++++++++++++++++++-- 5 files changed, 238 insertions(+), 22 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index f3560ec09..076d7fc9b 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -189,7 +189,11 @@ interface Bolt11Payment { [Throws=NodeError] Bolt11Invoice receive_via_jit_channel(u64 amount_msat, [ByRef]Bolt11InvoiceDescription description, u32 expiry_secs, u64? max_lsp_fee_limit_msat); [Throws=NodeError] + Bolt11Invoice receive_via_jit_channel_for_hash(u64 amount_msat, [ByRef]Bolt11InvoiceDescription description, u32 expiry_secs, u64? max_lsp_fee_limit_msat, PaymentHash payment_hash); + [Throws=NodeError] Bolt11Invoice receive_variable_amount_via_jit_channel([ByRef]Bolt11InvoiceDescription description, u32 expiry_secs, u64? max_proportional_lsp_fee_limit_ppm_msat); + [Throws=NodeError] + Bolt11Invoice receive_variable_amount_via_jit_channel_for_hash([ByRef]Bolt11InvoiceDescription description, u32 expiry_secs, u64? max_proportional_lsp_fee_limit_ppm_msat, PaymentHash payment_hash); }; interface Bolt12Payment { diff --git a/src/event.rs b/src/event.rs index ae81f50e9..883177d67 100644 --- a/src/event.rs +++ b/src/event.rs @@ -685,7 +685,8 @@ where // the payment has been registered via `_for_hash` variants and needs to be manually claimed via // user interaction. match info.kind { - PaymentKind::Bolt11 { preimage, .. } => { + PaymentKind::Bolt11 { preimage, .. } + | PaymentKind::Bolt11Jit { preimage, .. } => { if purpose.preimage().is_none() { debug_assert!( preimage.is_none(), diff --git a/src/liquidity.rs b/src/liquidity.rs index 9b103ee82..6ee8066c1 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -988,7 +988,7 @@ where pub(crate) async fn lsps2_receive_to_jit_channel( &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, - max_total_lsp_fee_limit_msat: Option, + max_total_lsp_fee_limit_msat: Option, payment_hash: Option, ) -> Result<(Bolt11Invoice, u64), Error> { let fee_response = self.lsps2_request_opening_fee_params().await?; @@ -1040,6 +1040,7 @@ where Some(amount_msat), description, expiry_secs, + payment_hash, )?; log_info!(self.logger, "JIT-channel invoice created: {}", invoice); @@ -1048,7 +1049,7 @@ where pub(crate) async fn lsps2_receive_variable_amount_to_jit_channel( &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, - max_proportional_lsp_fee_limit_ppm_msat: Option, + max_proportional_lsp_fee_limit_ppm_msat: Option, payment_hash: Option, ) -> Result<(Bolt11Invoice, u64), Error> { let fee_response = self.lsps2_request_opening_fee_params().await?; @@ -1082,8 +1083,13 @@ where ); let buy_response = self.lsps2_send_buy_request(None, min_opening_params).await?; - let invoice = - self.lsps2_create_jit_invoice(buy_response, None, description, expiry_secs)?; + let invoice = self.lsps2_create_jit_invoice( + buy_response, + None, + description, + expiry_secs, + payment_hash, + )?; log_info!(self.logger, "JIT-channel invoice created: {}", invoice); Ok((invoice, min_prop_fee_ppm_msat)) @@ -1166,18 +1172,36 @@ where fn lsps2_create_jit_invoice( &self, buy_response: LSPS2BuyResponse, amount_msat: Option, description: &Bolt11InvoiceDescription, expiry_secs: u32, + payment_hash: Option, ) -> Result { let lsps2_client = self.lsps2_client.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; // LSPS2 requires min_final_cltv_expiry_delta to be at least 2 more than usual. let min_final_cltv_expiry_delta = MIN_FINAL_CLTV_EXPIRY_DELTA + 2; - let (payment_hash, payment_secret) = self - .channel_manager - .create_inbound_payment(None, expiry_secs, Some(min_final_cltv_expiry_delta)) - .map_err(|e| { - log_error!(self.logger, "Failed to register inbound payment: {:?}", e); - Error::InvoiceCreationFailed - })?; + let (payment_hash, payment_secret) = match payment_hash { + Some(payment_hash) => { + let payment_secret = self + .channel_manager + .create_inbound_payment_for_hash( + payment_hash, + None, + expiry_secs, + Some(min_final_cltv_expiry_delta), + ) + .map_err(|e| { + log_error!(self.logger, "Failed to register inbound payment: {:?}", e); + Error::InvoiceCreationFailed + })?; + (payment_hash, payment_secret) + }, + None => self + .channel_manager + .create_inbound_payment(None, expiry_secs, Some(min_final_cltv_expiry_delta)) + .map_err(|e| { + log_error!(self.logger, "Failed to register inbound payment: {:?}", e); + Error::InvoiceCreationFailed + })?, + }; let route_hint = RouteHint(vec![RouteHintHop { src_node_id: lsps2_client.lsp_node_id, diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 389c818c8..92d7fc948 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -362,8 +362,17 @@ impl Bolt11Payment { } if let Some(details) = self.payment_store.get(&payment_id) { - if let Some(expected_amount_msat) = details.amount_msat { - if claimable_amount_msat < expected_amount_msat { + // For payments requested via `receive*_via_jit_channel_for_hash()` + // `skimmed_fee_msat` held by LSP must be taken into account. + let skimmed_fee_msat = match details.kind { + PaymentKind::Bolt11Jit { + counterparty_skimmed_fee_msat: Some(skimmed_fee_msat), + .. + } => skimmed_fee_msat, + _ => 0, + }; + if let Some(invoice_amount_msat) = details.amount_msat { + if claimable_amount_msat < invoice_amount_msat - skimmed_fee_msat { log_error!( self.logger, "Failed to manually claim payment {} as the claimable amount is less than expected", @@ -580,6 +589,46 @@ impl Bolt11Payment { expiry_secs, max_total_lsp_fee_limit_msat, None, + None, + )?; + Ok(maybe_wrap(invoice)) + } + + /// Returns a payable invoice that can be used to request a payment of the amount given and + /// receive it via a newly created just-in-time (JIT) channel. + /// + /// When the returned invoice is paid, the configured [LSPS2]-compliant LSP will open a channel + /// to us, supplying just-in-time inbound liquidity. + /// + /// If set, `max_total_lsp_fee_limit_msat` will limit how much fee we allow the LSP to take for opening the + /// channel to us. We'll use its cheapest offer otherwise. + /// + /// We will register the given payment hash and emit a [`PaymentClaimable`] event once + /// the inbound payment arrives. The check that [`counterparty_skimmed_fee_msat`] is within the limits + /// is performed *before* emitting the event. + /// + /// **Note:** users *MUST* handle this event and claim the payment manually via + /// [`claim_for_hash`] as soon as they have obtained access to the preimage of the given + /// payment hash. If they're unable to obtain the preimage, they *MUST* immediately fail the payment via + /// [`fail_for_hash`]. + /// + /// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md + /// [`PaymentClaimable`]: crate::Event::PaymentClaimable + /// [`claim_for_hash`]: Self::claim_for_hash + /// [`fail_for_hash`]: Self::fail_for_hash + /// [`counterparty_skimmed_fee_msat`]: crate::payment::PaymentKind::Bolt11Jit::counterparty_skimmed_fee_msat + pub fn receive_via_jit_channel_for_hash( + &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, + max_total_lsp_fee_limit_msat: Option, payment_hash: PaymentHash, + ) -> Result { + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_via_jit_channel_inner( + Some(amount_msat), + &description, + expiry_secs, + max_total_lsp_fee_limit_msat, + None, + Some(payment_hash), )?; Ok(maybe_wrap(invoice)) } @@ -606,6 +655,47 @@ impl Bolt11Payment { expiry_secs, None, max_proportional_lsp_fee_limit_ppm_msat, + None, + )?; + Ok(maybe_wrap(invoice)) + } + + /// Returns a payable invoice that can be used to request a variable amount payment (also known + /// as "zero-amount" invoice) and receive it via a newly created just-in-time (JIT) channel. + /// + /// When the returned invoice is paid, the configured [LSPS2]-compliant LSP will open a channel + /// to us, supplying just-in-time inbound liquidity. + /// + /// If set, `max_proportional_lsp_fee_limit_ppm_msat` will limit how much proportional fee, in + /// parts-per-million millisatoshis, we allow the LSP to take for opening the channel to us. + /// We'll use its cheapest offer otherwise. + /// + /// We will register the given payment hash and emit a [`PaymentClaimable`] event once + /// the inbound payment arrives. The check that [`counterparty_skimmed_fee_msat`] is within the limits + /// is performed *before* emitting the event. + /// + /// **Note:** users *MUST* handle this event and claim the payment manually via + /// [`claim_for_hash`] as soon as they have obtained access to the preimage of the given + /// payment hash. If they're unable to obtain the preimage, they *MUST* immediately fail the payment via + /// [`fail_for_hash`]. + /// + /// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md + /// [`PaymentClaimable`]: crate::Event::PaymentClaimable + /// [`claim_for_hash`]: Self::claim_for_hash + /// [`fail_for_hash`]: Self::fail_for_hash + /// [`counterparty_skimmed_fee_msat`]: crate::payment::PaymentKind::Bolt11Jit::counterparty_skimmed_fee_msat + pub fn receive_variable_amount_via_jit_channel_for_hash( + &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, + max_proportional_lsp_fee_limit_ppm_msat: Option, payment_hash: PaymentHash, + ) -> Result { + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_via_jit_channel_inner( + None, + &description, + expiry_secs, + None, + max_proportional_lsp_fee_limit_ppm_msat, + Some(payment_hash), )?; Ok(maybe_wrap(invoice)) } @@ -613,7 +703,7 @@ impl Bolt11Payment { fn receive_via_jit_channel_inner( &self, amount_msat: Option, description: &LdkBolt11InvoiceDescription, expiry_secs: u32, max_total_lsp_fee_limit_msat: Option, - max_proportional_lsp_fee_limit_ppm_msat: Option, + max_proportional_lsp_fee_limit_ppm_msat: Option, payment_hash: Option, ) -> Result { let liquidity_source = self.liquidity_source.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; @@ -645,6 +735,7 @@ impl Bolt11Payment { description, expiry_secs, max_total_lsp_fee_limit_msat, + payment_hash, ) .await .map(|(invoice, total_fee)| (invoice, Some(total_fee), None)) @@ -654,6 +745,7 @@ impl Bolt11Payment { description, expiry_secs, max_proportional_lsp_fee_limit_ppm_msat, + payment_hash, ) .await .map(|(invoice, prop_fee)| (invoice, None, Some(prop_fee))) diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index ad3867429..9fea3094f 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -9,7 +9,8 @@ mod common; use common::{ do_channel_full_cycle, expect_channel_pending_event, expect_channel_ready_event, expect_event, - expect_payment_received_event, expect_payment_successful_event, generate_blocks_and_wait, + expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, + generate_blocks_and_wait, logging::{init_log_logger, validate_log_entry, TestLogWriter}, open_channel, premine_and_distribute_funds, random_config, random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_two_nodes, wait_for_tx, @@ -29,7 +30,7 @@ use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::util::persist::KVStore; use lightning_invoice::{Bolt11InvoiceDescription, Description}; -use lightning_types::payment::PaymentPreimage; +use lightning_types::payment::{PaymentHash, PaymentPreimage}; use bitcoin::address::NetworkUnchecked; use bitcoin::hashes::sha256::Hash as Sha256Hash; @@ -1334,6 +1335,7 @@ fn lsps2_client_service_integration() { let payment_id = payer_node.bolt11_payment().send(&jit_invoice, None).unwrap(); expect_channel_pending_event!(service_node, client_node.node_id()); expect_channel_ready_event!(service_node, client_node.node_id()); + expect_event!(service_node, PaymentForwarded); expect_channel_pending_event!(client_node, service_node.node_id()); expect_channel_ready_event!(client_node, service_node.node_id()); @@ -1359,19 +1361,112 @@ fn lsps2_client_service_integration() { println!("Generating regular invoice!"); let invoice_description = - Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()); + Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()).into(); let amount_msat = 5_000_000; - let invoice = client_node - .bolt11_payment() - .receive(amount_msat, &invoice_description.into(), 1024) - .unwrap(); + let invoice = + client_node.bolt11_payment().receive(amount_msat, &invoice_description, 1024).unwrap(); // Have the payer_node pay the invoice, to check regular forwards service_node -> client_node // are working as expected. println!("Paying regular invoice!"); let payment_id = payer_node.bolt11_payment().send(&invoice, None).unwrap(); expect_payment_successful_event!(payer_node, Some(payment_id), None); + expect_event!(service_node, PaymentForwarded); expect_payment_received_event!(client_node, amount_msat); + + //////////////////////////////////////////////////////////////////////////// + // receive_via_jit_channel_for_hash and claim_for_hash + //////////////////////////////////////////////////////////////////////////// + println!("Generating JIT invoice!"); + // Increase the amount to make sure it does not fit into the existing channels. + let jit_amount_msat = 200_000_000; + let manual_preimage = PaymentPreimage([42u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let jit_invoice = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description, + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let payment_id = payer_node.bolt11_payment().send(&jit_invoice, None).unwrap(); + expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + + let service_fee_msat = (jit_amount_msat * channel_opening_fee_ppm as u64) / 1_000_000; + let expected_received_amount_msat = jit_amount_msat - service_fee_msat; + let claimable_amount_msat = expect_payment_claimable_event!( + client_node, + payment_id, + manual_payment_hash, + expected_received_amount_msat + ); + println!("Claiming payment!"); + client_node + .bolt11_payment() + .claim_for_hash(manual_payment_hash, claimable_amount_msat, manual_preimage) + .unwrap(); + + expect_event!(service_node, PaymentForwarded); + expect_payment_successful_event!(payer_node, Some(payment_id), None); + let client_payment_id = + expect_payment_received_event!(client_node, expected_received_amount_msat).unwrap(); + let client_payment = client_node.payment(&client_payment_id).unwrap(); + match client_payment.kind { + PaymentKind::Bolt11Jit { counterparty_skimmed_fee_msat, .. } => { + assert_eq!(counterparty_skimmed_fee_msat, Some(service_fee_msat)); + }, + _ => panic!("Unexpected payment kind"), + } + + //////////////////////////////////////////////////////////////////////////// + // receive_via_jit_channel_for_hash and fail_for_hash + //////////////////////////////////////////////////////////////////////////// + println!("Generating JIT invoice!"); + // Increase the amount to make sure it does not fit into the existing channels. + let jit_amount_msat = 400_000_000; + let manual_preimage = PaymentPreimage([43u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let jit_invoice = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description, + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let payment_id = payer_node.bolt11_payment().send(&jit_invoice, None).unwrap(); + expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + + let service_fee_msat = (jit_amount_msat * channel_opening_fee_ppm as u64) / 1_000_000; + let expected_received_amount_msat = jit_amount_msat - service_fee_msat; + expect_payment_claimable_event!( + client_node, + payment_id, + manual_payment_hash, + expected_received_amount_msat + ); + println!("Failing payment!"); + client_node.bolt11_payment().fail_for_hash(manual_payment_hash).unwrap(); + + expect_event!(payer_node, PaymentFailed); + assert_eq!(client_node.payment(&payment_id).unwrap().status, PaymentStatus::Failed); } #[test] From 3fe4f2f6b086f88737b4a5534faf1879183592bc Mon Sep 17 00:00:00 2001 From: moisesPomilio <93723302+moisesPompilio@users.noreply.github.com> Date: Thu, 14 Aug 2025 16:37:38 -0300 Subject: [PATCH 058/184] Fix RPC Txid handling and mempool eviction - Replace `bitcoin::consensus::encode::deserialize_hex()` with `hex_str.parse::()` when parsing Txids from RPC, and `serialize_hex()` with `txid.to_string()` when sending to RPC, ensuring proper handling of Bitcoin Core's reversed-byte hexadecimal format. - Fix mempool eviction logic: transactions that are no longer in the mempool are now correctly removed from wallet consideration, preventing stale pending transactions from inflating unconfirmed balances. - Refactor `unconfirmed_txids` to `bdk_unconfirmed_txids` to make it easier to identify what are unconfirmed bdk transactions --- src/chain/bitcoind.rs | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index a120f8253..6ea9f271e 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -370,8 +370,11 @@ impl BitcoindChainSource { let cur_height = channel_manager.current_best_block().height; let now = SystemTime::now(); - let unconfirmed_txids = self.onchain_wallet.get_unconfirmed_txids(); - match self.api_client.get_updated_mempool_transactions(cur_height, unconfirmed_txids).await + let bdk_unconfirmed_txids = self.onchain_wallet.get_unconfirmed_txids(); + match self + .api_client + .get_updated_mempool_transactions(cur_height, bdk_unconfirmed_txids) + .await { Ok((unconfirmed_txs, evicted_txids)) => { log_trace!( @@ -754,7 +757,7 @@ impl BitcoindClient { async fn get_raw_transaction_rpc( rpc_client: Arc, txid: &Txid, ) -> std::io::Result> { - let txid_hex = bitcoin::consensus::encode::serialize_hex(txid); + let txid_hex = txid.to_string(); let txid_json = serde_json::json!(txid_hex); match rpc_client .call_method::("getrawtransaction", &[txid_json]) @@ -792,7 +795,7 @@ impl BitcoindClient { async fn get_raw_transaction_rest( rest_client: Arc, txid: &Txid, ) -> std::io::Result> { - let txid_hex = bitcoin::consensus::encode::serialize_hex(txid); + let txid_hex = txid.to_string(); let tx_path = format!("tx/{}.json", txid_hex); match rest_client .request_resource::(&tx_path) @@ -889,7 +892,7 @@ impl BitcoindClient { async fn get_mempool_entry_inner( client: Arc, txid: Txid, ) -> std::io::Result> { - let txid_hex = bitcoin::consensus::encode::serialize_hex(&txid); + let txid_hex = txid.to_string(); let txid_json = serde_json::json!(txid_hex); match client.call_method::("getmempoolentry", &[txid_json]).await { @@ -964,11 +967,12 @@ impl BitcoindClient { /// - mempool transactions, alongside their first-seen unix timestamps. /// - transactions that have been evicted from the mempool, alongside the last time they were seen absent. pub(crate) async fn get_updated_mempool_transactions( - &self, best_processed_height: u32, unconfirmed_txids: Vec, + &self, best_processed_height: u32, bdk_unconfirmed_txids: Vec, ) -> std::io::Result<(Vec<(Transaction, u64)>, Vec<(Txid, u64)>)> { let mempool_txs = self.get_mempool_transactions_and_timestamp_at_height(best_processed_height).await?; - let evicted_txids = self.get_evicted_mempool_txids_and_timestamp(unconfirmed_txids).await?; + let evicted_txids = + self.get_evicted_mempool_txids_and_timestamp(bdk_unconfirmed_txids).await?; Ok((mempool_txs, evicted_txids)) } @@ -1078,14 +1082,14 @@ impl BitcoindClient { // To this end, we first update our local mempool_entries_cache and then return all unconfirmed // wallet `Txid`s that don't appear in the mempool still. async fn get_evicted_mempool_txids_and_timestamp( - &self, unconfirmed_txids: Vec, + &self, bdk_unconfirmed_txids: Vec, ) -> std::io::Result> { match self { BitcoindClient::Rpc { latest_mempool_timestamp, mempool_entries_cache, .. } => { Self::get_evicted_mempool_txids_and_timestamp_inner( latest_mempool_timestamp, mempool_entries_cache, - unconfirmed_txids, + bdk_unconfirmed_txids, ) .await }, @@ -1093,7 +1097,7 @@ impl BitcoindClient { Self::get_evicted_mempool_txids_and_timestamp_inner( latest_mempool_timestamp, mempool_entries_cache, - unconfirmed_txids, + bdk_unconfirmed_txids, ) .await }, @@ -1103,13 +1107,13 @@ impl BitcoindClient { async fn get_evicted_mempool_txids_and_timestamp_inner( latest_mempool_timestamp: &AtomicU64, mempool_entries_cache: &tokio::sync::Mutex>, - unconfirmed_txids: Vec, + bdk_unconfirmed_txids: Vec, ) -> std::io::Result> { let latest_mempool_timestamp = latest_mempool_timestamp.load(Ordering::Relaxed); let mempool_entries_cache = mempool_entries_cache.lock().await; - let evicted_txids = unconfirmed_txids + let evicted_txids = bdk_unconfirmed_txids .into_iter() - .filter(|txid| mempool_entries_cache.contains_key(txid)) + .filter(|txid| !mempool_entries_cache.contains_key(txid)) .map(|txid| (txid, latest_mempool_timestamp)) .collect(); Ok(evicted_txids) @@ -1236,7 +1240,7 @@ impl TryInto for JsonResponse { for hex in res { let txid = if let Some(hex_str) = hex.as_str() { - match bitcoin::consensus::encode::deserialize_hex(hex_str) { + match hex_str.parse::() { Ok(txid) => txid, Err(_) => { return Err(std::io::Error::new( From 5fe06c842cccae824e094640053b9a3a86fcd38c Mon Sep 17 00:00:00 2001 From: moisesPomilio <93723302+moisesPompilio@users.noreply.github.com> Date: Thu, 14 Aug 2025 16:40:47 -0300 Subject: [PATCH 059/184] Add property-based tests for RPC JSON responses - Validate roundtrip serialization/deserialization for Txids, transactions, mempool entries, and fee responses. - Ensure Txid parsing/serialization matches Bitcoin Core RPC expectations. --- src/chain/bitcoind.rs | 161 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 161 insertions(+) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 6ea9f271e..c282a6141 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -1411,3 +1411,164 @@ impl std::fmt::Display for HttpError { write!(f, "status_code: {}, contents: {}", self.status_code, contents) } } + +#[cfg(test)] +mod tests { + use bitcoin::hashes::Hash; + use bitcoin::{FeeRate, OutPoint, ScriptBuf, Transaction, TxIn, TxOut, Txid, Witness}; + use lightning_block_sync::http::JsonResponse; + use proptest::{arbitrary::any, collection::vec, prop_assert_eq, prop_compose, proptest}; + use serde_json::json; + + use crate::chain::bitcoind::{ + FeeResponse, GetMempoolEntryResponse, GetRawMempoolResponse, GetRawTransactionResponse, + MempoolMinFeeResponse, + }; + + prop_compose! { + fn arbitrary_witness()( + witness_elements in vec(vec(any::(), 0..100), 0..20) + ) -> Witness { + let mut witness = Witness::new(); + for element in witness_elements { + witness.push(element); + } + witness + } + } + + prop_compose! { + fn arbitrary_txin()( + outpoint_hash in any::<[u8; 32]>(), + outpoint_vout in any::(), + script_bytes in vec(any::(), 0..100), + witness in arbitrary_witness(), + sequence in any::() + ) -> TxIn { + TxIn { + previous_output: OutPoint { + txid: Txid::from_byte_array(outpoint_hash), + vout: outpoint_vout, + }, + script_sig: ScriptBuf::from_bytes(script_bytes), + sequence: bitcoin::Sequence::from_consensus(sequence), + witness, + } + } + } + + prop_compose! { + fn arbitrary_txout()( + value in 0u64..21_000_000_00_000_000u64, + script_bytes in vec(any::(), 0..100) + ) -> TxOut { + TxOut { + value: bitcoin::Amount::from_sat(value), + script_pubkey: ScriptBuf::from_bytes(script_bytes), + } + } + } + + prop_compose! { + fn arbitrary_transaction()( + version in any::(), + inputs in vec(arbitrary_txin(), 1..20), + outputs in vec(arbitrary_txout(), 1..20), + lock_time in any::() + ) -> Transaction { + Transaction { + version: bitcoin::transaction::Version(version), + input: inputs, + output: outputs, + lock_time: bitcoin::absolute::LockTime::from_consensus(lock_time), + } + } + } + + proptest! { + #![proptest_config(proptest::test_runner::Config::with_cases(20))] + + #[test] + fn prop_get_raw_mempool_response_roundtrip(txids in vec(any::<[u8;32]>(), 0..10)) { + let txid_vec: Vec = txids.into_iter().map(Txid::from_byte_array).collect(); + let original = GetRawMempoolResponse(txid_vec.clone()); + + let json_vec: Vec = txid_vec.iter().map(|t| t.to_string()).collect(); + let json_val = serde_json::Value::Array(json_vec.iter().map(|s| json!(s)).collect()); + + let resp = JsonResponse(json_val); + let decoded: GetRawMempoolResponse = resp.try_into().unwrap(); + + prop_assert_eq!(original.0.len(), decoded.0.len()); + + prop_assert_eq!(original.0, decoded.0); + } + + #[test] + fn prop_get_mempool_entry_response_roundtrip( + time in any::(), + height in any::() + ) { + let json_val = json!({ + "time": time, + "height": height + }); + + let resp = JsonResponse(json_val); + let decoded: GetMempoolEntryResponse = resp.try_into().unwrap(); + + prop_assert_eq!(decoded.time, time); + prop_assert_eq!(decoded.height, height); + } + + #[test] + fn prop_get_raw_transaction_response_roundtrip(tx in arbitrary_transaction()) { + let hex = bitcoin::consensus::encode::serialize_hex(&tx); + let json_val = serde_json::Value::String(hex.clone()); + + let resp = JsonResponse(json_val); + let decoded: GetRawTransactionResponse = resp.try_into().unwrap(); + + prop_assert_eq!(decoded.0.compute_txid(), tx.compute_txid()); + prop_assert_eq!(decoded.0.compute_wtxid(), tx.compute_wtxid()); + + prop_assert_eq!(decoded.0, tx); + } + + #[test] + fn prop_fee_response_roundtrip(fee_rate in any::()) { + let fee_rate = fee_rate.abs(); + let json_val = json!({ + "feerate": fee_rate, + "errors": serde_json::Value::Null + }); + + let resp = JsonResponse(json_val); + let decoded: FeeResponse = resp.try_into().unwrap(); + + let expected = { + let fee_rate_sat_per_kwu = (fee_rate * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + prop_assert_eq!(decoded.0, expected); + } + + #[test] + fn prop_mempool_min_fee_response_roundtrip(fee_rate in any::()) { + let fee_rate = fee_rate.abs(); + let json_val = json!({ + "mempoolminfee": fee_rate + }); + + let resp = JsonResponse(json_val); + let decoded: MempoolMinFeeResponse = resp.try_into().unwrap(); + + let expected = { + let fee_rate_sat_per_kwu = (fee_rate * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + prop_assert_eq!(decoded.0, expected); + } + + } +} From 4922e8da5b0622bb89e35c86fd6b143a1f0cecd2 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 18 Aug 2025 15:41:39 +0200 Subject: [PATCH 060/184] Refactor move background task fields to `Runtime` Previously we a) added a new internal `Runtime` API that cleans up our internal logic and b) added tracking for spawned background tasks to be able to await/abort them on shutdown. Here we move the tracking into the `Runtime` object, which will allow us to easily extend the tracking to *any* spawned tasks in the next step. --- src/builder.rs | 15 ++-- src/lib.rs | 186 ++++++++++--------------------------------------- src/runtime.rs | 158 +++++++++++++++++++++++++++++++++++++++-- 3 files changed, 192 insertions(+), 167 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 729cefe1b..7f15cced6 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -668,9 +668,9 @@ impl NodeBuilder { let logger = setup_logger(&self.log_writer_config, &self.config)?; let runtime = if let Some(handle) = self.runtime_handle.as_ref() { - Arc::new(Runtime::with_handle(handle.clone())) + Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger))) } else { - Arc::new(Runtime::new().map_err(|e| { + Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| { log_error!(logger, "Failed to setup tokio runtime: {}", e); BuildError::RuntimeSetupFailed })?) @@ -715,9 +715,9 @@ impl NodeBuilder { let logger = setup_logger(&self.log_writer_config, &self.config)?; let runtime = if let Some(handle) = self.runtime_handle.as_ref() { - Arc::new(Runtime::with_handle(handle.clone())) + Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger))) } else { - Arc::new(Runtime::new().map_err(|e| { + Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| { log_error!(logger, "Failed to setup tokio runtime: {}", e); BuildError::RuntimeSetupFailed })?) @@ -1668,18 +1668,11 @@ fn build_with_store_internal( }; let (stop_sender, _) = tokio::sync::watch::channel(()); - let background_processor_task = Mutex::new(None); - let background_tasks = Mutex::new(None); - let cancellable_background_tasks = Mutex::new(None); - let is_running = Arc::new(RwLock::new(false)); Ok(Node { runtime, stop_sender, - background_processor_task, - background_tasks, - cancellable_background_tasks, config, wallet, chain_source, diff --git a/src/lib.rs b/src/lib.rs index cc5e383a1..1604d1b46 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -128,9 +128,8 @@ pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ - default_user_config, may_announce_channel, ChannelConfig, Config, - BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS, LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, - NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, + default_user_config, may_announce_channel, ChannelConfig, Config, NODE_ANN_BCAST_INTERVAL, + PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; use event::{EventHandler, EventQueue}; @@ -181,9 +180,6 @@ uniffi::include_scaffolding!("ldk_node"); pub struct Node { runtime: Arc, stop_sender: tokio::sync::watch::Sender<()>, - background_processor_task: Mutex>>, - background_tasks: Mutex>>, - cancellable_background_tasks: Mutex>>, config: Arc, wallet: Arc, chain_source: Arc, @@ -226,10 +222,6 @@ impl Node { return Err(Error::AlreadyRunning); } - let mut background_tasks = tokio::task::JoinSet::new(); - let mut cancellable_background_tasks = tokio::task::JoinSet::new(); - let runtime_handle = self.runtime.handle(); - log_info!( self.logger, "Starting up LDK Node with node ID {} on network: {}", @@ -253,19 +245,11 @@ impl Node { let sync_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); - background_tasks.spawn_on( - async move { - chain_source - .continuously_sync_wallets( - stop_sync_receiver, - sync_cman, - sync_cmon, - sync_sweeper, - ) - .await; - }, - runtime_handle, - ); + self.runtime.spawn_background_task(async move { + chain_source + .continuously_sync_wallets(stop_sync_receiver, sync_cman, sync_cmon, sync_sweeper) + .await; + }); if self.gossip_source.is_rgs() { let gossip_source = Arc::clone(&self.gossip_source); @@ -273,7 +257,7 @@ impl Node { let gossip_sync_logger = Arc::clone(&self.logger); let gossip_node_metrics = Arc::clone(&self.node_metrics); let mut stop_gossip_sync = self.stop_sender.subscribe(); - cancellable_background_tasks.spawn_on(async move { + self.runtime.spawn_cancellable_background_task(async move { let mut interval = tokio::time::interval(RGS_SYNC_INTERVAL); loop { tokio::select! { @@ -314,7 +298,7 @@ impl Node { } } } - }, runtime_handle); + }); } if let Some(listening_addresses) = &self.config.listening_addresses { @@ -340,7 +324,7 @@ impl Node { bind_addrs.extend(resolved_address); } - cancellable_background_tasks.spawn_on(async move { + self.runtime.spawn_cancellable_background_task(async move { { let listener = tokio::net::TcpListener::bind(&*bind_addrs).await @@ -378,7 +362,7 @@ impl Node { } listening_indicator.store(false, Ordering::Release); - }, runtime_handle); + }); } // Regularly reconnect to persisted peers. @@ -387,7 +371,7 @@ impl Node { let connect_logger = Arc::clone(&self.logger); let connect_peer_store = Arc::clone(&self.peer_store); let mut stop_connect = self.stop_sender.subscribe(); - cancellable_background_tasks.spawn_on(async move { + self.runtime.spawn_cancellable_background_task(async move { let mut interval = tokio::time::interval(PEER_RECONNECTION_INTERVAL); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); loop { @@ -415,7 +399,7 @@ impl Node { } } } - }, runtime_handle); + }); // Regularly broadcast node announcements. let bcast_cm = Arc::clone(&self.channel_manager); @@ -427,7 +411,7 @@ impl Node { let mut stop_bcast = self.stop_sender.subscribe(); let node_alias = self.config.node_alias.clone(); if may_announce_channel(&self.config).is_ok() { - cancellable_background_tasks.spawn_on(async move { + self.runtime.spawn_cancellable_background_task(async move { // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. #[cfg(not(test))] let mut interval = tokio::time::interval(Duration::from_secs(30)); @@ -498,15 +482,14 @@ impl Node { } } } - }, runtime_handle); + }); } let stop_tx_bcast = self.stop_sender.subscribe(); let chain_source = Arc::clone(&self.chain_source); - cancellable_background_tasks.spawn_on( - async move { chain_source.continuously_process_broadcast_queue(stop_tx_bcast).await }, - runtime_handle, - ); + self.runtime.spawn_cancellable_background_task(async move { + chain_source.continuously_process_broadcast_queue(stop_tx_bcast).await + }); let bump_tx_event_handler = Arc::new(BumpTransactionEventHandler::new( Arc::clone(&self.tx_broadcaster), @@ -563,7 +546,7 @@ impl Node { }) }; - let handle = self.runtime.spawn(async move { + self.runtime.spawn_background_processor_task(async move { process_events_async( background_persister, |e| background_event_handler.handle_event(e), @@ -584,38 +567,27 @@ impl Node { panic!("Failed to process events"); }); }); - debug_assert!(self.background_processor_task.lock().unwrap().is_none()); - *self.background_processor_task.lock().unwrap() = Some(handle); if let Some(liquidity_source) = self.liquidity_source.as_ref() { let mut stop_liquidity_handler = self.stop_sender.subscribe(); let liquidity_handler = Arc::clone(&liquidity_source); let liquidity_logger = Arc::clone(&self.logger); - background_tasks.spawn_on( - async move { - loop { - tokio::select! { - _ = stop_liquidity_handler.changed() => { - log_debug!( - liquidity_logger, - "Stopping processing liquidity events.", - ); - return; - } - _ = liquidity_handler.handle_next_event() => {} + self.runtime.spawn_background_task(async move { + loop { + tokio::select! { + _ = stop_liquidity_handler.changed() => { + log_debug!( + liquidity_logger, + "Stopping processing liquidity events.", + ); + return; } + _ = liquidity_handler.handle_next_event() => {} } - }, - runtime_handle, - ); + } + }); } - debug_assert!(self.background_tasks.lock().unwrap().is_none()); - *self.background_tasks.lock().unwrap() = Some(background_tasks); - - debug_assert!(self.cancellable_background_tasks.lock().unwrap().is_none()); - *self.cancellable_background_tasks.lock().unwrap() = Some(cancellable_background_tasks); - log_info!(self.logger, "Startup complete."); *is_running_lock = true; Ok(()) @@ -649,15 +621,7 @@ impl Node { } // Cancel cancellable background tasks - if let Some(mut tasks) = self.cancellable_background_tasks.lock().unwrap().take() { - let runtime_handle = self.runtime.handle(); - tasks.abort_all(); - tokio::task::block_in_place(move || { - runtime_handle.block_on(async { while let Some(_) = tasks.join_next().await {} }) - }); - } else { - debug_assert!(false, "Expected some cancellable background tasks"); - }; + self.runtime.abort_cancellable_background_tasks(); // Disconnect all peers. self.peer_manager.disconnect_all_peers(); @@ -668,91 +632,13 @@ impl Node { log_debug!(self.logger, "Stopped chain sources."); // Wait until non-cancellable background tasks (mod LDK's background processor) are done. - let runtime_handle = self.runtime.handle(); - if let Some(mut tasks) = self.background_tasks.lock().unwrap().take() { - tokio::task::block_in_place(move || { - runtime_handle.block_on(async { - loop { - let timeout_fut = tokio::time::timeout( - Duration::from_secs(BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS), - tasks.join_next_with_id(), - ); - match timeout_fut.await { - Ok(Some(Ok((id, _)))) => { - log_trace!(self.logger, "Stopped background task with id {}", id); - }, - Ok(Some(Err(e))) => { - tasks.abort_all(); - log_trace!(self.logger, "Stopping background task failed: {}", e); - break; - }, - Ok(None) => { - log_debug!(self.logger, "Stopped all background tasks"); - break; - }, - Err(e) => { - tasks.abort_all(); - log_error!( - self.logger, - "Stopping background task timed out: {}", - e - ); - break; - }, - } - } - }) - }); - } else { - debug_assert!(false, "Expected some background tasks"); - }; + self.runtime.wait_on_background_tasks(); - // Wait until background processing stopped, at least until a timeout is reached. - if let Some(background_processor_task) = - self.background_processor_task.lock().unwrap().take() - { - let abort_handle = background_processor_task.abort_handle(); - let timeout_res = tokio::task::block_in_place(move || { - self.runtime.block_on(async { - tokio::time::timeout( - Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS), - background_processor_task, - ) - .await - }) - }); - - match timeout_res { - Ok(stop_res) => match stop_res { - Ok(()) => log_debug!(self.logger, "Stopped background processing of events."), - Err(e) => { - abort_handle.abort(); - log_error!( - self.logger, - "Stopping event handling failed. This should never happen: {}", - e - ); - panic!("Stopping event handling failed. This should never happen."); - }, - }, - Err(e) => { - abort_handle.abort(); - log_error!(self.logger, "Stopping event handling timed out: {}", e); - }, - } - } else { - debug_assert!(false, "Expected a background processing task"); - }; + // Finally, wait until background processing stopped, at least until a timeout is reached. + self.runtime.wait_on_background_processor_task(); #[cfg(tokio_unstable)] - { - let runtime_handle = self.runtime.handle(); - log_trace!( - self.logger, - "Active runtime tasks left prior to shutdown: {}", - runtime_handle.metrics().active_tasks_count() - ); - } + self.runtime.log_metrics(); log_info!(self.logger, "Shutdown complete."); *is_running_lock = false; diff --git a/src/runtime.rs b/src/runtime.rs index 4c1241165..0bd3941c7 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -5,16 +5,27 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use tokio::task::JoinHandle; +use crate::config::{ + BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS, LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, +}; +use crate::logger::{log_debug, log_error, log_trace, LdkLogger, Logger}; + +use tokio::task::{JoinHandle, JoinSet}; use std::future::Future; +use std::sync::{Arc, Mutex}; +use std::time::Duration; pub(crate) struct Runtime { mode: RuntimeMode, + background_tasks: Mutex>, + cancellable_background_tasks: Mutex>, + background_processor_task: Mutex>>, + logger: Arc, } impl Runtime { - pub fn new() -> Result { + pub fn new(logger: Arc) -> Result { let mode = match tokio::runtime::Handle::try_current() { Ok(handle) => RuntimeMode::Handle(handle), Err(_) => { @@ -22,12 +33,32 @@ impl Runtime { RuntimeMode::Owned(rt) }, }; - Ok(Self { mode }) + let background_tasks = Mutex::new(JoinSet::new()); + let cancellable_background_tasks = Mutex::new(JoinSet::new()); + let background_processor_task = Mutex::new(None); + + Ok(Self { + mode, + background_tasks, + cancellable_background_tasks, + background_processor_task, + logger, + }) } - pub fn with_handle(handle: tokio::runtime::Handle) -> Self { + pub fn with_handle(handle: tokio::runtime::Handle, logger: Arc) -> Self { let mode = RuntimeMode::Handle(handle); - Self { mode } + let background_tasks = Mutex::new(JoinSet::new()); + let cancellable_background_tasks = Mutex::new(JoinSet::new()); + let background_processor_task = Mutex::new(None); + + Self { + mode, + background_tasks, + cancellable_background_tasks, + background_processor_task, + logger, + } } pub fn spawn(&self, future: F) -> JoinHandle @@ -39,6 +70,36 @@ impl Runtime { handle.spawn(future) } + pub fn spawn_background_task(&self, future: F) + where + F: Future + Send + 'static, + { + let mut background_tasks = self.background_tasks.lock().unwrap(); + let runtime_handle = self.handle(); + background_tasks.spawn_on(future, runtime_handle); + } + + pub fn spawn_cancellable_background_task(&self, future: F) + where + F: Future + Send + 'static, + { + let mut cancellable_background_tasks = self.cancellable_background_tasks.lock().unwrap(); + let runtime_handle = self.handle(); + cancellable_background_tasks.spawn_on(future, runtime_handle); + } + + pub fn spawn_background_processor_task(&self, future: F) + where + F: Future + Send + 'static, + { + let mut background_processor_task = self.background_processor_task.lock().unwrap(); + debug_assert!(background_processor_task.is_none(), "Expected no background processor_task"); + + let runtime_handle = self.handle(); + let handle = runtime_handle.spawn(future); + *background_processor_task = Some(handle); + } + pub fn spawn_blocking(&self, func: F) -> JoinHandle where F: FnOnce() -> R + Send + 'static, @@ -58,7 +119,92 @@ impl Runtime { tokio::task::block_in_place(move || handle.block_on(future)) } - pub fn handle(&self) -> &tokio::runtime::Handle { + pub fn abort_cancellable_background_tasks(&self) { + let mut tasks = core::mem::take(&mut *self.cancellable_background_tasks.lock().unwrap()); + debug_assert!(tasks.len() > 0, "Expected some cancellable background_tasks"); + tasks.abort_all(); + self.block_on(async { while let Some(_) = tasks.join_next().await {} }) + } + + pub fn wait_on_background_tasks(&self) { + let mut tasks = core::mem::take(&mut *self.background_tasks.lock().unwrap()); + debug_assert!(tasks.len() > 0, "Expected some background_tasks"); + self.block_on(async { + loop { + let timeout_fut = tokio::time::timeout( + Duration::from_secs(BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS), + tasks.join_next_with_id(), + ); + match timeout_fut.await { + Ok(Some(Ok((id, _)))) => { + log_trace!(self.logger, "Stopped background task with id {}", id); + }, + Ok(Some(Err(e))) => { + tasks.abort_all(); + log_trace!(self.logger, "Stopping background task failed: {}", e); + break; + }, + Ok(None) => { + log_debug!(self.logger, "Stopped all background tasks"); + break; + }, + Err(e) => { + tasks.abort_all(); + log_error!(self.logger, "Stopping background task timed out: {}", e); + break; + }, + } + } + }) + } + + pub fn wait_on_background_processor_task(&self) { + if let Some(background_processor_task) = + self.background_processor_task.lock().unwrap().take() + { + let abort_handle = background_processor_task.abort_handle(); + let timeout_res = self.block_on(async { + tokio::time::timeout( + Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS), + background_processor_task, + ) + .await + }); + + match timeout_res { + Ok(stop_res) => match stop_res { + Ok(()) => log_debug!(self.logger, "Stopped background processing of events."), + Err(e) => { + abort_handle.abort(); + log_error!( + self.logger, + "Stopping event handling failed. This should never happen: {}", + e + ); + panic!("Stopping event handling failed. This should never happen."); + }, + }, + Err(e) => { + abort_handle.abort(); + log_error!(self.logger, "Stopping event handling timed out: {}", e); + }, + } + } else { + debug_assert!(false, "Expected a background processing task"); + }; + } + + #[cfg(tokio_unstable)] + pub fn log_metrics(&self) { + let runtime_handle = self.handle(); + log_trace!( + self.logger, + "Active runtime tasks left prior to shutdown: {}", + runtime_handle.metrics().active_tasks_count() + ); + } + + fn handle(&self) -> &tokio::runtime::Handle { match &self.mode { RuntimeMode::Owned(rt) => rt.handle(), RuntimeMode::Handle(handle) => handle, From 90a4fe1b7d33e7a61fb074e5e45e0e2d087f1456 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 19 Aug 2025 13:30:19 +0200 Subject: [PATCH 061/184] Drop `Runtime::spawn` in favor of `spawn_cancellable_background_task` We now drop the generic `spawn` from our internal `Runtime` API, ensuring we'd always have to either use `spawn_cancellable_background_task` or `spawn_background_task`. --- src/event.rs | 4 ++-- src/gossip.rs | 2 +- src/runtime.rs | 9 --------- 3 files changed, 3 insertions(+), 12 deletions(-) diff --git a/src/event.rs b/src/event.rs index 883177d67..ff94d51d1 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1059,7 +1059,7 @@ where forwarding_channel_manager.process_pending_htlc_forwards(); }; - self.runtime.spawn(future); + self.runtime.spawn_cancellable_background_task(future); }, LdkEvent::SpendableOutputs { outputs, channel_id } => { match self.output_sweeper.track_spendable_outputs(outputs, channel_id, true, None) { @@ -1441,7 +1441,7 @@ where } } }; - self.runtime.spawn(future); + self.runtime.spawn_cancellable_background_task(future); }, LdkEvent::BumpTransaction(bte) => { match bte { diff --git a/src/gossip.rs b/src/gossip.rs index 1185f0718..258f9f736 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -144,6 +144,6 @@ impl RuntimeSpawner { impl FutureSpawner for RuntimeSpawner { fn spawn + Send + 'static>(&self, future: T) { - self.runtime.spawn(future); + self.runtime.spawn_cancellable_background_task(future); } } diff --git a/src/runtime.rs b/src/runtime.rs index 0bd3941c7..b30790a04 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -61,15 +61,6 @@ impl Runtime { } } - pub fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - let handle = self.handle(); - handle.spawn(future) - } - pub fn spawn_background_task(&self, future: F) where F: Future + Send + 'static, From f3aab90cf93c5fdd15e5a05715b91b56ffe66bfd Mon Sep 17 00:00:00 2001 From: moisesPomilio <93723302+moisesPompilio@users.noreply.github.com> Date: Tue, 19 Aug 2025 10:32:51 -0300 Subject: [PATCH 062/184] Add RBF integration tests with multi-node setup - Test mempool-only RBF handling and balance adjustments. - Test RBF transactions confirmed in block, ensuring stale unconfirmed txs are removed. - Introduce `distribute_funds_unconfirmed` for creating unconfirmed outputs. --- tests/common/mod.rs | 90 +++++++++++++++++-- tests/integration_tests_rust.rs | 148 ++++++++++++++++++++++++++++++-- 2 files changed, 226 insertions(+), 12 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index ab66f0fdd..780e9bbf4 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -30,8 +30,10 @@ use lightning_types::payment::{PaymentHash, PaymentPreimage}; use lightning_persister::fs_store::FilesystemStore; use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; -use bitcoin::{Address, Amount, Network, OutPoint, Txid}; +use bitcoin::hashes::{hex::FromHex, Hash}; +use bitcoin::{ + Address, Amount, Network, OutPoint, ScriptBuf, Sequence, Transaction, Txid, Witness, +}; use electrsd::corepc_node::Client as BitcoindClient; use electrsd::corepc_node::Node as BitcoinD; @@ -42,7 +44,7 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde_json::{json, Value}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::env; use std::path::PathBuf; use std::sync::{Arc, RwLock}; @@ -487,12 +489,25 @@ where pub(crate) fn premine_and_distribute_funds( bitcoind: &BitcoindClient, electrs: &E, addrs: Vec
, amount: Amount, ) { + premine_blocks(bitcoind, electrs); + + distribute_funds_unconfirmed(bitcoind, electrs, addrs, amount); + generate_blocks_and_wait(bitcoind, electrs, 1); +} + +pub(crate) fn premine_blocks(bitcoind: &BitcoindClient, electrs: &E) { let _ = bitcoind.create_wallet("ldk_node_test"); let _ = bitcoind.load_wallet("ldk_node_test"); generate_blocks_and_wait(bitcoind, electrs, 101); +} - let amounts: HashMap = - addrs.iter().map(|addr| (addr.to_string(), amount.to_btc())).collect(); +pub(crate) fn distribute_funds_unconfirmed( + bitcoind: &BitcoindClient, electrs: &E, addrs: Vec
, amount: Amount, +) -> Txid { + let mut amounts = HashMap::::new(); + for addr in &addrs { + amounts.insert(addr.to_string(), amount.to_btc()); + } let empty_account = json!(""); let amounts_json = json!(amounts); @@ -505,7 +520,70 @@ pub(crate) fn premine_and_distribute_funds( .unwrap(); wait_for_tx(electrs, txid); - generate_blocks_and_wait(bitcoind, electrs, 1); + + txid +} + +pub(crate) fn prepare_rbf( + electrs: &E, txid: Txid, scripts_buf: &HashSet, +) -> (Transaction, usize) { + let tx = electrs.transaction_get(&txid).unwrap(); + + let fee_output_index = tx + .output + .iter() + .position(|output| !scripts_buf.contains(&output.script_pubkey)) + .expect("No output available for fee bumping"); + + (tx, fee_output_index) +} + +pub(crate) fn bump_fee_and_broadcast( + bitcoind: &BitcoindClient, electrs: &E, mut tx: Transaction, fee_output_index: usize, + is_insert_block: bool, +) -> Transaction { + let mut bump_fee_amount_sat = tx.vsize() as u64; + let attempts = 5; + + for _ in 0..attempts { + let fee_output = &mut tx.output[fee_output_index]; + let new_fee_value = fee_output.value.to_sat().saturating_sub(bump_fee_amount_sat); + if new_fee_value < 546 { + panic!("Warning: Fee output approaching dust limit ({} sats)", new_fee_value); + } + fee_output.value = Amount::from_sat(new_fee_value); + + for input in &mut tx.input { + input.sequence = Sequence::ENABLE_RBF_NO_LOCKTIME; + input.script_sig = ScriptBuf::new(); + input.witness = Witness::new(); + } + + let signed_result = bitcoind.sign_raw_transaction_with_wallet(&tx).unwrap(); + assert!(signed_result.complete, "Failed to sign RBF transaction"); + + let tx_bytes = Vec::::from_hex(&signed_result.hex).unwrap(); + tx = bitcoin::consensus::encode::deserialize::(&tx_bytes).unwrap(); + + match bitcoind.send_raw_transaction(&tx) { + Ok(res) => { + if is_insert_block { + generate_blocks_and_wait(bitcoind, electrs, 1); + } + let new_txid: Txid = res.0.parse().unwrap(); + wait_for_tx(electrs, new_txid); + return tx; + }, + Err(_) => { + bump_fee_amount_sat += bump_fee_amount_sat * 5; + if tx.output[fee_output_index].value.to_sat() < bump_fee_amount_sat { + panic!("Insufficient funds to increase fee"); + } + }, + } + } + + panic!("Failed to bump fee after {} attempts", attempts); } pub fn open_channel( diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 9fea3094f..0932116ef 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -8,13 +8,14 @@ mod common; use common::{ - do_channel_full_cycle, expect_channel_pending_event, expect_channel_ready_event, expect_event, + bump_fee_and_broadcast, distribute_funds_unconfirmed, do_channel_full_cycle, + expect_channel_pending_event, expect_channel_ready_event, expect_event, expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, generate_blocks_and_wait, logging::{init_log_logger, validate_log_entry, TestLogWriter}, - open_channel, premine_and_distribute_funds, random_config, random_listening_addresses, - setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_two_nodes, wait_for_tx, - TestChainSource, TestSyncStore, + open_channel, premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, + random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, + setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, }; use ldk_node::config::EsploraSyncConfig; @@ -35,10 +36,10 @@ use lightning_types::payment::{PaymentHash, PaymentPreimage}; use bitcoin::address::NetworkUnchecked; use bitcoin::hashes::sha256::Hash as Sha256Hash; use bitcoin::hashes::Hash; -use bitcoin::Address; -use bitcoin::Amount; +use bitcoin::{Address, Amount, ScriptBuf}; use log::LevelFilter; +use std::collections::HashSet; use std::str::FromStr; use std::sync::Arc; @@ -670,6 +671,141 @@ fn onchain_wallet_recovery() { ); } +#[test] +fn test_rbf_via_mempool() { + run_rbf_test(false); +} + +#[test] +fn test_rbf_via_direct_block_insertion() { + run_rbf_test(true); +} + +// `is_insert_block`: +// - `true`: transaction is mined immediately (no mempool), testing confirmed-Tx handling. +// - `false`: transaction stays in mempool until confirmation, testing unconfirmed-Tx handling. +fn run_rbf_test(is_insert_block: bool) { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source_bitcoind = TestChainSource::BitcoindRpcSync(&bitcoind); + let chain_source_electrsd = TestChainSource::Electrum(&electrsd); + let chain_source_esplora = TestChainSource::Esplora(&electrsd); + + macro_rules! config_node { + ($chain_source: expr, $anchor_channels: expr) => {{ + let config_a = random_config($anchor_channels); + let node = setup_node(&$chain_source, config_a, None); + node + }}; + } + let anchor_channels = false; + let nodes = vec![ + config_node!(chain_source_electrsd, anchor_channels), + config_node!(chain_source_bitcoind, anchor_channels), + config_node!(chain_source_esplora, anchor_channels), + ]; + + let (bitcoind, electrs) = (&bitcoind.client, &electrsd.client); + premine_blocks(bitcoind, electrs); + + // Helpers declaration before starting the test + let all_addrs = + nodes.iter().map(|node| node.onchain_payment().new_address().unwrap()).collect::>(); + let amount_sat = 2_100_000; + let mut txid; + macro_rules! distribute_funds_all_nodes { + () => { + txid = distribute_funds_unconfirmed( + bitcoind, + electrs, + all_addrs.clone(), + Amount::from_sat(amount_sat), + ); + }; + } + macro_rules! validate_balances { + ($expected_balance_sat: expr, $is_spendable: expr) => { + let spend_balance = if $is_spendable { $expected_balance_sat } else { 0 }; + for node in &nodes { + node.sync_wallets().unwrap(); + let balances = node.list_balances(); + assert_eq!(balances.spendable_onchain_balance_sats, spend_balance); + assert_eq!(balances.total_onchain_balance_sats, $expected_balance_sat); + } + }; + } + + let scripts_buf: HashSet = + all_addrs.iter().map(|addr| addr.script_pubkey()).collect(); + let mut tx; + let mut fee_output_index; + + // Modify the output to the nodes + distribute_funds_all_nodes!(); + validate_balances!(amount_sat, false); + (tx, fee_output_index) = prepare_rbf(electrs, txid, &scripts_buf); + tx.output.iter_mut().for_each(|output| { + if scripts_buf.contains(&output.script_pubkey) { + let new_addr = bitcoind.new_address().unwrap(); + output.script_pubkey = new_addr.script_pubkey(); + } + }); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block); + validate_balances!(0, is_insert_block); + + // Not modifying the output scripts, but still bumping the fee. + distribute_funds_all_nodes!(); + validate_balances!(amount_sat, false); + (tx, fee_output_index) = prepare_rbf(electrs, txid, &scripts_buf); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block); + validate_balances!(amount_sat, is_insert_block); + + let mut final_amount_sat = amount_sat * 2; + let value_sat = 21_000; + + // Increase the value of the nodes' outputs + distribute_funds_all_nodes!(); + (tx, fee_output_index) = prepare_rbf(electrs, txid, &scripts_buf); + tx.output.iter_mut().for_each(|output| { + if scripts_buf.contains(&output.script_pubkey) { + output.value = Amount::from_sat(output.value.to_sat() + value_sat); + } + }); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block); + final_amount_sat += value_sat; + validate_balances!(final_amount_sat, is_insert_block); + + // Decreases the value of the nodes' outputs + distribute_funds_all_nodes!(); + final_amount_sat += amount_sat; + (tx, fee_output_index) = prepare_rbf(electrs, txid, &scripts_buf); + tx.output.iter_mut().for_each(|output| { + if scripts_buf.contains(&output.script_pubkey) { + output.value = Amount::from_sat(output.value.to_sat() - value_sat); + } + }); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block); + final_amount_sat -= value_sat; + validate_balances!(final_amount_sat, is_insert_block); + + if !is_insert_block { + generate_blocks_and_wait(bitcoind, electrs, 1); + validate_balances!(final_amount_sat, true); + } + + // Check if it is possible to send all funds from the node + let mut txids = Vec::new(); + let addr = bitcoind.new_address().unwrap(); + nodes.iter().for_each(|node| { + let txid = node.onchain_payment().send_all_to_address(&addr, true, None).unwrap(); + txids.push(txid); + }); + txids.iter().for_each(|txid| { + wait_for_tx(electrs, *txid); + }); + generate_blocks_and_wait(bitcoind, electrs, 6); + validate_balances!(0, true); +} + #[test] fn sign_verify_msg() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); From b7550709fc191853d05658a05529b1073d71d4fb Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 20 Aug 2025 09:14:20 +0200 Subject: [PATCH 063/184] Introduce separate stop signal for background processor Previously, we'd use the same stop signal for the background processor as for all other background tasks which could result in the BP getting stopped while other tasks are still produced changes that needed to be processed before shutdown. Here we introduce a separate stop signal for LDK's background processor, ensuring we first shutdown everything else and disconnect all peers before finally sending the BP shutdown signal and awaiting its shutdown. --- src/builder.rs | 2 ++ src/lib.rs | 32 +++++++++++++++++++++++++------- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 7f15cced6..0ef3434d6 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1668,11 +1668,13 @@ fn build_with_store_internal( }; let (stop_sender, _) = tokio::sync::watch::channel(()); + let (background_processor_stop_sender, _) = tokio::sync::watch::channel(()); let is_running = Arc::new(RwLock::new(false)); Ok(Node { runtime, stop_sender, + background_processor_stop_sender, config, wallet, chain_source, diff --git a/src/lib.rs b/src/lib.rs index 1604d1b46..e77a2f8a6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -180,6 +180,7 @@ uniffi::include_scaffolding!("ldk_node"); pub struct Node { runtime: Arc, stop_sender: tokio::sync::watch::Sender<()>, + background_processor_stop_sender: tokio::sync::watch::Sender<()>, config: Arc, wallet: Arc, chain_source: Arc, @@ -525,7 +526,7 @@ impl Node { let background_logger = Arc::clone(&self.logger); let background_error_logger = Arc::clone(&self.logger); let background_scorer = Arc::clone(&self.scorer); - let stop_bp = self.stop_sender.subscribe(); + let stop_bp = self.background_processor_stop_sender.subscribe(); let sleeper_logger = Arc::clone(&self.logger); let sleeper = move |d| { let mut stop = stop_bp.clone(); @@ -607,18 +608,20 @@ impl Node { // Stop any runtime-dependant chain sources. self.chain_source.stop(); - // Stop the runtime. - match self.stop_sender.send(()) { - Ok(_) => log_trace!(self.logger, "Sent shutdown signal to background tasks."), - Err(e) => { + // Stop background tasks. + self.stop_sender + .send(()) + .map(|_| { + log_trace!(self.logger, "Sent shutdown signal to background tasks."); + }) + .unwrap_or_else(|e| { log_error!( self.logger, "Failed to send shutdown signal. This should never happen: {}", e ); debug_assert!(false); - }, - } + }); // Cancel cancellable background tasks self.runtime.abort_cancellable_background_tasks(); @@ -634,6 +637,21 @@ impl Node { // Wait until non-cancellable background tasks (mod LDK's background processor) are done. self.runtime.wait_on_background_tasks(); + // Stop the background processor. + self.background_processor_stop_sender + .send(()) + .map(|_| { + log_trace!(self.logger, "Sent shutdown signal to background processor."); + }) + .unwrap_or_else(|e| { + log_error!( + self.logger, + "Failed to send shutdown signal. This should never happen: {}", + e + ); + debug_assert!(false); + }); + // Finally, wait until background processing stopped, at least until a timeout is reached. self.runtime.wait_on_background_processor_task(); From c428c4ca8a16cbf0ed377560b7f0bdbff4b1742c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 20 Aug 2025 09:17:43 +0200 Subject: [PATCH 064/184] Remove duplicate call to `chain_source.stop()` This was introduced during a rebase. Here we simply drop the redundant call. --- src/lib.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index e77a2f8a6..da86fce73 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -605,9 +605,6 @@ impl Node { log_info!(self.logger, "Shutting down LDK Node with node ID {}...", self.node_id()); - // Stop any runtime-dependant chain sources. - self.chain_source.stop(); - // Stop background tasks. self.stop_sender .send(()) @@ -630,13 +627,13 @@ impl Node { self.peer_manager.disconnect_all_peers(); log_debug!(self.logger, "Disconnected all network peers."); + // Wait until non-cancellable background tasks (mod LDK's background processor) are done. + self.runtime.wait_on_background_tasks(); + // Stop any runtime-dependant chain sources. self.chain_source.stop(); log_debug!(self.logger, "Stopped chain sources."); - // Wait until non-cancellable background tasks (mod LDK's background processor) are done. - self.runtime.wait_on_background_tasks(); - // Stop the background processor. self.background_processor_stop_sender .send(()) From ab3d78d1ecd05a755c836915284e5ca60c65692a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 22 May 2025 11:17:24 +0200 Subject: [PATCH 065/184] Use `crate::runtime::Runtime` for `VssStore` .. which now gives us cleaner reuse/handling of outer runtime contexts, cleanup on `Drop`, etc. --- src/builder.rs | 5 +-- src/io/vss_store.rs | 107 +++++++++++++++++++++++++------------------- 2 files changed, 61 insertions(+), 51 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 7f15cced6..02ad77ea2 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -694,10 +694,7 @@ impl NodeBuilder { let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes(); let vss_store = - VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider).map_err(|e| { - log_error!(logger, "Failed to setup VssStore: {}", e); - BuildError::KVStoreSetupFailed - })?; + VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider, Arc::clone(&runtime)); build_with_store_internal( config, self.chain_data_source_config.as_ref(), diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 296eaabe3..e2cfc3c7b 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -6,6 +6,8 @@ // accordance with one or both of these licenses. use crate::io::utils::check_namespace_key_validity; +use crate::runtime::Runtime; + use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::KVStore; @@ -15,7 +17,6 @@ use rand::RngCore; use std::panic::RefUnwindSafe; use std::sync::Arc; use std::time::Duration; -use tokio::runtime::Runtime; use vss_client::client::VssClient; use vss_client::error::VssError; use vss_client::headers::VssHeaderProvider; @@ -41,7 +42,7 @@ type CustomRetryPolicy = FilteredRetryPolicy< pub struct VssStore { client: VssClient, store_id: String, - runtime: Runtime, + runtime: Arc, storable_builder: StorableBuilder, key_obfuscator: KeyObfuscator, } @@ -49,9 +50,8 @@ pub struct VssStore { impl VssStore { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], - header_provider: Arc, - ) -> io::Result { - let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build()?; + header_provider: Arc, runtime: Arc, + ) -> Self { let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); @@ -70,7 +70,7 @@ impl VssStore { }) as _); let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); - Ok(Self { client, store_id, runtime, storable_builder, key_obfuscator }) + Self { client, store_id, runtime, storable_builder, key_obfuscator } } fn build_key( @@ -136,19 +136,16 @@ impl KVStore for VssStore { store_id: self.store_id.clone(), key: self.build_key(primary_namespace, secondary_namespace, key)?, }; - - let resp = - tokio::task::block_in_place(|| self.runtime.block_on(self.client.get_object(&request))) - .map_err(|e| { - let msg = format!( - "Failed to read from key {}/{}/{}: {}", - primary_namespace, secondary_namespace, key, e - ); - match e { - VssError::NoSuchKeyError(..) => Error::new(ErrorKind::NotFound, msg), - _ => Error::new(ErrorKind::Other, msg), - } - })?; + let resp = self.runtime.block_on(self.client.get_object(&request)).map_err(|e| { + let msg = format!( + "Failed to read from key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + match e { + VssError::NoSuchKeyError(..) => Error::new(ErrorKind::NotFound, msg), + _ => Error::new(ErrorKind::Other, msg), + } + })?; // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { @@ -179,14 +176,13 @@ impl KVStore for VssStore { delete_items: vec![], }; - tokio::task::block_in_place(|| self.runtime.block_on(self.client.put_object(&request))) - .map_err(|e| { - let msg = format!( - "Failed to write to key {}/{}/{}: {}", - primary_namespace, secondary_namespace, key, e - ); - Error::new(ErrorKind::Other, msg) - })?; + self.runtime.block_on(self.client.put_object(&request)).map_err(|e| { + let msg = format!( + "Failed to write to key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + Error::new(ErrorKind::Other, msg) + })?; Ok(()) } @@ -204,30 +200,29 @@ impl KVStore for VssStore { }), }; - tokio::task::block_in_place(|| self.runtime.block_on(self.client.delete_object(&request))) - .map_err(|e| { - let msg = format!( - "Failed to delete key {}/{}/{}: {}", - primary_namespace, secondary_namespace, key, e - ); - Error::new(ErrorKind::Other, msg) - })?; + self.runtime.block_on(self.client.delete_object(&request)).map_err(|e| { + let msg = format!( + "Failed to delete key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + Error::new(ErrorKind::Other, msg) + })?; Ok(()) } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; - let keys = tokio::task::block_in_place(|| { - self.runtime.block_on(self.list_all_keys(primary_namespace, secondary_namespace)) - }) - .map_err(|e| { - let msg = format!( - "Failed to retrieve keys in namespace: {}/{} : {}", - primary_namespace, secondary_namespace, e - ); - Error::new(ErrorKind::Other, msg) - })?; + let keys = self + .runtime + .block_on(self.list_all_keys(primary_namespace, secondary_namespace)) + .map_err(|e| { + let msg = format!( + "Failed to retrieve keys in namespace: {}/{} : {}", + primary_namespace, secondary_namespace, e + ); + Error::new(ErrorKind::Other, msg) + })?; Ok(keys) } @@ -266,10 +261,27 @@ mod tests { use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng, RngCore}; use std::collections::HashMap; + use tokio::runtime; use vss_client::headers::FixedHeaders; #[test] - fn read_write_remove_list_persist() { + fn vss_read_write_remove_list_persist() { + let runtime = Arc::new(Runtime::new().unwrap()); + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + let mut rng = thread_rng(); + let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); + let mut vss_seed = [0u8; 32]; + rng.fill_bytes(&mut vss_seed); + let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime).unwrap(); + + do_read_write_remove_list_persist(&vss_store); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn vss_read_write_remove_list_persist_in_runtime_context() { + let runtime = Arc::new(Runtime::new().unwrap()); let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); let mut rng = thread_rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); @@ -277,8 +289,9 @@ mod tests { rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime).unwrap(); do_read_write_remove_list_persist(&vss_store); + drop(vss_store) } } From 24e6947be9a4b66d85a0bb8125ee7863678484e4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 21 Aug 2025 11:53:15 +0200 Subject: [PATCH 066/184] Require 16kb pages sizes for Android builds Google announced that starting Nov 2025 all apps in the Play Store will be required to be compatible with 16KB page sizes (cf. https://developer.android.com/guide/practices/page-sizes). Here we update our linker flags to ensure the generated shared libraries meet that requirement. --- scripts/uniffi_bindgen_generate_kotlin_android.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/uniffi_bindgen_generate_kotlin_android.sh b/scripts/uniffi_bindgen_generate_kotlin_android.sh index 142e5f75d..c87db5924 100755 --- a/scripts/uniffi_bindgen_generate_kotlin_android.sh +++ b/scripts/uniffi_bindgen_generate_kotlin_android.sh @@ -35,9 +35,9 @@ case "$OSTYPE" in PATH="$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/$LLVM_ARCH_PATH/bin:$PATH" rustup target add x86_64-linux-android aarch64-linux-android armv7-linux-androideabi -CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER="x86_64-linux-android21-clang" CC="x86_64-linux-android21-clang" cargo build --profile release-smaller --features uniffi --target x86_64-linux-android || exit 1 -CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_LINKER="armv7a-linux-androideabi21-clang" CC="armv7a-linux-androideabi21-clang" cargo build --profile release-smaller --features uniffi --target armv7-linux-androideabi || exit 1 -CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="aarch64-linux-android21-clang" CC="aarch64-linux-android21-clang" cargo build --profile release-smaller --features uniffi --target aarch64-linux-android || exit 1 +RUSTFLAGS="-C link-args=-Wl,-z,max-page-size=16384,-z,common-page-size=16384" CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER="x86_64-linux-android21-clang" CC="x86_64-linux-android21-clang" cargo build --profile release-smaller --features uniffi --target x86_64-linux-android || exit 1 +RUSTFLAGS="-C link-args=-Wl,-z,max-page-size=16384,-z,common-page-size=16384" CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_LINKER="armv7a-linux-androideabi21-clang" CC="armv7a-linux-androideabi21-clang" cargo build --profile release-smaller --features uniffi --target armv7-linux-androideabi || exit 1 +RUSTFLAGS="-C link-args=-Wl,-z,max-page-size=16384,-z,common-page-size=16384" CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="aarch64-linux-android21-clang" CC="aarch64-linux-android21-clang" cargo build --profile release-smaller --features uniffi --target aarch64-linux-android || exit 1 $UNIFFI_BINDGEN_BIN generate bindings/ldk_node.udl --language kotlin --config uniffi-android.toml -o "$BINDINGS_DIR"/"$PROJECT_DIR"/lib/src/main/kotlin || exit 1 JNI_LIB_DIR="$BINDINGS_DIR"/"$PROJECT_DIR"/lib/src/main/jniLibs/ From 02208abdd6eb33e8f59cfada192ef007c24389e1 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 2 Sep 2025 17:01:43 +0200 Subject: [PATCH 067/184] Relax `MaximumFeeEstimate` confirmation target estimation `MaximumFeeEstimate` is mostly used for protection against fee-inflation attacks. As users were previously impacted by this limit being too restrictive (read: too low), we bump it here a bit to give them some leeway. --- src/fee_estimator.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index f000245aa..f8ddcd5fd 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -150,6 +150,17 @@ pub(crate) fn apply_post_estimation_adjustments( .max(FEERATE_FLOOR_SATS_PER_KW as u64); FeeRate::from_sat_per_kwu(slightly_less_than_background) }, + ConfirmationTarget::Lightning(LdkConfirmationTarget::MaximumFeeEstimate) => { + // MaximumFeeEstimate is mostly used for protection against fee-inflation attacks. As + // users were previously impacted by this limit being too restrictive (read: too low), + // we bump it here a bit to give them some leeway. + let slightly_bump = estimated_rate + .to_sat_per_kwu() + .saturating_mul(11) + .saturating_div(10) + .saturating_add(2500); + FeeRate::from_sat_per_kwu(slightly_bump) + }, _ => estimated_rate, } } From 66b1a883f48d70faecd946cc522d803c21b811d7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 31 Jul 2025 08:54:54 +0200 Subject: [PATCH 068/184] Stop migrating spendable outputs from pre-v0.3 format As part of the version v0.3 release we switched to the upstreamed `OutputSweeper` which slightly changed our serialization format, having us run a migration step on startup for backwards compatibility ever since. Here we drop the migration code running on startup, for simplicity's sake, but also because it's going to be async going forward and we currently don't have a runtime available on startup (which might change soon, but still). As the v0.3 release now well over a year ago, it's very unlikely that there are any v0.2 (or even v0.3) users left. If there are any affected users left, they'll first have to upgrade to any version pre-v0.7, startup, and then upgrade to v0.7 or later. --- src/balance.rs | 13 +++++-- src/builder.rs | 16 +------- src/io/mod.rs | 5 --- src/io/utils.rs | 100 +----------------------------------------------- src/lib.rs | 1 - src/sweep.rs | 47 ----------------------- 6 files changed, 12 insertions(+), 170 deletions(-) delete mode 100644 src/sweep.rs diff --git a/src/balance.rs b/src/balance.rs index b5e2f5eb7..d0ebc310b 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -5,17 +5,16 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::sweep::value_from_descriptor; - use lightning::chain::channelmonitor::Balance as LdkBalance; use lightning::chain::channelmonitor::BalanceSource; use lightning::ln::types::ChannelId; +use lightning::sign::SpendableOutputDescriptor; use lightning::util::sweep::{OutputSpendStatus, TrackedSpendableOutput}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; use bitcoin::secp256k1::PublicKey; -use bitcoin::{BlockHash, Txid}; +use bitcoin::{Amount, BlockHash, Txid}; /// Details of the known available balances returned by [`Node::list_balances`]. /// @@ -385,3 +384,11 @@ impl PendingSweepBalance { } } } + +fn value_from_descriptor(descriptor: &SpendableOutputDescriptor) -> Amount { + match &descriptor { + SpendableOutputDescriptor::StaticOutput { output, .. } => output.value, + SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.output.value, + SpendableOutputDescriptor::StaticPaymentOutput(output) => output.output.value, + } +} diff --git a/src/builder.rs b/src/builder.rs index e160d1f6e..289c2954c 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -25,7 +25,7 @@ use crate::io::{ use crate::liquidity::{ LSPS1ClientConfig, LSPS2ClientConfig, LSPS2ServiceConfig, LiquiditySourceBuilder, }; -use crate::logger::{log_error, log_info, LdkLogger, LogLevel, LogWriter, Logger}; +use crate::logger::{log_error, LdkLogger, LogLevel, LogWriter, Logger}; use crate::message_handler::NodeCustomMessageHandler; use crate::peer_store::PeerStore; use crate::runtime::Runtime; @@ -1627,20 +1627,6 @@ fn build_with_store_internal( }, }; - match io::utils::migrate_deprecated_spendable_outputs( - Arc::clone(&output_sweeper), - Arc::clone(&kv_store), - Arc::clone(&logger), - ) { - Ok(()) => { - log_info!(logger, "Successfully migrated OutputSweeper data."); - }, - Err(e) => { - log_error!(logger, "Failed to migrate OutputSweeper data: {}", e); - return Err(BuildError::ReadFailed); - }, - } - let event_queue = match io::utils::read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger)) { Ok(event_queue) => Arc::new(event_queue), diff --git a/src/io/mod.rs b/src/io/mod.rs index 3192dbb86..7a52a5c98 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -27,11 +27,6 @@ pub(crate) const PEER_INFO_PERSISTENCE_KEY: &str = "peers"; pub(crate) const PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE: &str = "payments"; pub(crate) const PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; -/// The spendable output information used to persisted under this prefix until LDK Node v0.3.0. -pub(crate) const DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE: &str = - "spendable_outputs"; -pub(crate) const DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; - /// The node metrics will be persisted under this key. pub(crate) const NODE_METRICS_PRIMARY_NAMESPACE: &str = ""; pub(crate) const NODE_METRICS_SECONDARY_NAMESPACE: &str = ""; diff --git a/src/io/utils.rs b/src/io/utils.rs index b5537ed7d..06a1017ba 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -15,7 +15,6 @@ use crate::io::{ }; use crate::logger::{log_error, LdkLogger, Logger}; use crate::peer_store::PeerStore; -use crate::sweep::DeprecatedSpendableOutputInfo; use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; @@ -33,7 +32,7 @@ use lightning::util::persist::{ }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use lightning::util::string::PrintableString; -use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; +use lightning::util::sweep::OutputSweeper; use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; @@ -258,103 +257,6 @@ pub(crate) fn read_output_sweeper( }) } -/// Read previously persisted spendable output information from the store and migrate to the -/// upstreamed `OutputSweeper`. -/// -/// We first iterate all `DeprecatedSpendableOutputInfo`s and have them tracked by the new -/// `OutputSweeper`. In order to be certain the initial output spends will happen in a single -/// transaction (and safe on-chain fees), we batch them to happen at current height plus two -/// blocks. Lastly, we remove the previously persisted data once we checked they are tracked and -/// awaiting their initial spend at the correct height. -/// -/// Note that this migration will be run in the `Builder`, i.e., at the time when the migration is -/// happening no background sync is ongoing, so we shouldn't have a risk of interleaving block -/// connections during the migration. -pub(crate) fn migrate_deprecated_spendable_outputs( - sweeper: Arc, kv_store: Arc, logger: L, -) -> Result<(), std::io::Error> -where - L::Target: LdkLogger, -{ - let best_block = sweeper.current_best_block(); - - for stored_key in kv_store.list( - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - )? { - let mut reader = Cursor::new(kv_store.read( - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &stored_key, - )?); - let output = DeprecatedSpendableOutputInfo::read(&mut reader).map_err(|e| { - log_error!(logger, "Failed to deserialize SpendableOutputInfo: {}", e); - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Failed to deserialize SpendableOutputInfo", - ) - })?; - let descriptors = vec![output.descriptor.clone()]; - let spend_delay = Some(best_block.height + 2); - sweeper - .track_spendable_outputs(descriptors, output.channel_id, true, spend_delay) - .map_err(|_| { - log_error!(logger, "Failed to track spendable outputs. Aborting migration, will retry in the future."); - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Failed to track spendable outputs. Aborting migration, will retry in the future.", - ) - })?; - - if let Some(tracked_spendable_output) = - sweeper.tracked_spendable_outputs().iter().find(|o| o.descriptor == output.descriptor) - { - match tracked_spendable_output.status { - OutputSpendStatus::PendingInitialBroadcast { delayed_until_height } => { - if delayed_until_height == spend_delay { - kv_store.remove( - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &stored_key, - false, - )?; - } else { - debug_assert!(false, "Unexpected status in OutputSweeper migration."); - log_error!(logger, "Unexpected status in OutputSweeper migration."); - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to migrate OutputSweeper state.", - )); - } - }, - _ => { - debug_assert!(false, "Unexpected status in OutputSweeper migration."); - log_error!(logger, "Unexpected status in OutputSweeper migration."); - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to migrate OutputSweeper state.", - )); - }, - } - } else { - debug_assert!( - false, - "OutputSweeper failed to track and persist outputs during migration." - ); - log_error!( - logger, - "OutputSweeper failed to track and persist outputs during migration." - ); - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to migrate OutputSweeper state.", - )); - } - } - - Ok(()) -} - pub(crate) fn read_node_metrics( kv_store: Arc, logger: L, ) -> Result diff --git a/src/lib.rs b/src/lib.rs index da86fce73..9035d5361 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -95,7 +95,6 @@ mod message_handler; pub mod payment; mod peer_store; mod runtime; -mod sweep; mod tx_broadcaster; mod types; mod wallet; diff --git a/src/sweep.rs b/src/sweep.rs deleted file mode 100644 index ba10869b8..000000000 --- a/src/sweep.rs +++ /dev/null @@ -1,47 +0,0 @@ -// This file is Copyright its original authors, visible in version control history. -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in -// accordance with one or both of these licenses. - -//! The output sweeper used to live here before we upstreamed it to `rust-lightning` and migrated -//! to the upstreamed version with LDK Node v0.3.0 (May 2024). We should drop this module entirely -//! once sufficient time has passed for us to be confident any users completed the migration. - -use lightning::impl_writeable_tlv_based; -use lightning::ln::types::ChannelId; -use lightning::sign::SpendableOutputDescriptor; - -use bitcoin::{Amount, BlockHash, Transaction}; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub(crate) struct DeprecatedSpendableOutputInfo { - pub(crate) id: [u8; 32], - pub(crate) descriptor: SpendableOutputDescriptor, - pub(crate) channel_id: Option, - pub(crate) first_broadcast_hash: Option, - pub(crate) latest_broadcast_height: Option, - pub(crate) latest_spending_tx: Option, - pub(crate) confirmation_height: Option, - pub(crate) confirmation_hash: Option, -} - -impl_writeable_tlv_based!(DeprecatedSpendableOutputInfo, { - (0, id, required), - (2, descriptor, required), - (4, channel_id, option), - (6, first_broadcast_hash, option), - (8, latest_broadcast_height, option), - (10, latest_spending_tx, option), - (12, confirmation_height, option), - (14, confirmation_hash, option), -}); - -pub(crate) fn value_from_descriptor(descriptor: &SpendableOutputDescriptor) -> Amount { - match &descriptor { - SpendableOutputDescriptor::StaticOutput { output, .. } => output.value, - SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.output.value, - SpendableOutputDescriptor::StaticPaymentOutput(output) => output.output.value, - } -} From 02c2663715041a6c04e0627f7095d981505d37af Mon Sep 17 00:00:00 2001 From: moisesPomilio <93723302+moisesPompilio@users.noreply.github.com> Date: Tue, 9 Sep 2025 10:24:53 -0300 Subject: [PATCH 069/184] Check last_best_block_hash before updating fee_rate to avoid unnecessary updates Close #520 --- src/chain/bitcoind.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index c282a6141..a808f6294 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -261,6 +261,7 @@ impl BitcoindChainSource { log_info!(self.logger, "Starting continuous polling for chain updates."); // Start the polling loop. + let mut last_best_block_hash = None; loop { tokio::select! { _ = stop_sync_receiver.changed() => { @@ -278,7 +279,12 @@ impl BitcoindChainSource { ).await; } _ = fee_rate_update_interval.tick() => { - let _ = self.update_fee_rate_estimates().await; + if last_best_block_hash != Some(channel_manager.current_best_block().block_hash) { + let update_res = self.update_fee_rate_estimates().await; + if update_res.is_ok() { + last_best_block_hash = Some(channel_manager.current_best_block().block_hash); + } + } } } } From ed12e65591813d22da5be51b0af8d92e8e7ba5cd Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 6 Feb 2025 16:14:34 +0100 Subject: [PATCH 070/184] Bump to LDK main (4e32d852) We bump our LDK dependency to 0.2-devel, up to commit `4e32d85249359d8ef8ece97d89848e40154363ab`. --- Cargo.toml | 51 +++-- bindings/ldk_node.udl | 81 ++++---- src/builder.rs | 49 +++-- src/chain/bitcoind.rs | 12 +- src/chain/electrum.rs | 9 +- src/chain/esplora.rs | 15 +- src/config.rs | 12 +- src/data_store.rs | 22 +-- src/event.rs | 82 +++++--- src/ffi/types.rs | 94 +++++++-- src/io/sqlite_store/migrations.rs | 2 +- src/io/sqlite_store/mod.rs | 11 +- src/io/test_utils.rs | 24 +-- src/io/utils.rs | 9 +- src/io/vss_store.rs | 10 +- src/lib.rs | 65 +++---- src/liquidity.rs | 154 +++++---------- src/message_handler.rs | 3 +- src/payment/bolt11.rs | 189 +++++++++--------- src/payment/bolt12.rs | 55 +++--- src/payment/mod.rs | 84 -------- src/payment/spontaneous.rs | 58 +++--- src/payment/store.rs | 2 +- src/peer_store.rs | 2 +- src/types.rs | 29 +-- src/wallet/mod.rs | 311 ++++++++++++++---------------- tests/common/mod.rs | 14 +- tests/integration_tests_rust.rs | 19 +- 28 files changed, 690 insertions(+), 778 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 96a9eea53..aaaa55f39 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,38 +28,53 @@ panic = 'abort' # Abort on panic default = [] [dependencies] -lightning = { version = "0.1.0", features = ["std"] } -lightning-types = { version = "0.2.0" } -lightning-invoice = { version = "0.33.0", features = ["std"] } -lightning-net-tokio = { version = "0.1.0" } -lightning-persister = { version = "0.1.0" } -lightning-background-processor = { version = "0.1.0", features = ["futures"] } -lightning-rapid-gossip-sync = { version = "0.1.0" } -lightning-block-sync = { version = "0.1.0", features = ["rpc-client", "rest-client", "tokio"] } -lightning-transaction-sync = { version = "0.1.0", features = ["esplora-async-https", "time", "electrum"] } -lightning-liquidity = { version = "0.1.0", features = ["std"] } +#lightning = { version = "0.1.0", features = ["std"] } +#lightning-types = { version = "0.2.0" } +#lightning-invoice = { version = "0.33.0", features = ["std"] } +#lightning-net-tokio = { version = "0.1.0" } +#lightning-persister = { version = "0.1.0" } +#lightning-background-processor = { version = "0.1.0" } +#lightning-rapid-gossip-sync = { version = "0.1.0" } +#lightning-block-sync = { version = "0.1.0", features = ["rest-client", "rpc-client", "tokio"] } +#lightning-transaction-sync = { version = "0.1.0", features = ["esplora-async-https", "time", "electrum"] } +#lightning-liquidity = { version = "0.1.0", features = ["std"] } +#lightning-macros = { version = "0.1.0" } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } #lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } #lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["futures"] } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["rpc-client", "tokio"] } +#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["rest-client", "rpc-client", "tokio"] } #lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["esplora-async-https", "electrum", "time"] } #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } + +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["esplora-async-https", "electrum", "time"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } #lightning = { path = "../rust-lightning/lightning", features = ["std"] } #lightning-types = { path = "../rust-lightning/lightning-types" } #lightning-invoice = { path = "../rust-lightning/lightning-invoice", features = ["std"] } #lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } #lightning-persister = { path = "../rust-lightning/lightning-persister" } -#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor", features = ["futures"] } +#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } #lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } -#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rpc-client", "tokio"] } +#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rest-client", "rpc-client", "tokio"] } #lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum", "time"] } #lightning-liquidity = { path = "../rust-lightning/lightning-liquidity", features = ["std"] } +#lightning-macros = { path = "../rust-lightning/lightning-macros" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -78,11 +93,6 @@ rand = "0.8.5" chrono = { version = "0.4", default-features = false, features = ["clock"] } tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } esplora-client = { version = "0.12", default-features = false, features = ["tokio", "async-https-rustls"] } - -# FIXME: This was introduced to decouple the `bdk_esplora` and -# `lightning-transaction-sync` APIs. We should drop it as part of the upgrade -# to LDK 0.2. -esplora-client_0_11 = { package = "esplora-client", version = "0.11", default-features = false, features = ["tokio", "async-https-rustls"] } electrum-client = { version = "0.24.0", default-features = true } libc = "0.2" uniffi = { version = "0.28.3", features = ["build"], optional = true } @@ -97,8 +107,9 @@ prost = { version = "0.11.6", default-features = false} winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.1.0", features = ["std", "_test_utils"] } +#lightning = { version = "0.1.0", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 076d7fc9b..b9bab61e8 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -12,7 +12,7 @@ dictionary Config { sequence trusted_peers_0conf; u64 probing_liquidity_limit_multiplier; AnchorChannelsConfig? anchor_channels_config; - SendingParameters? sending_parameters; + RouteParametersConfig? route_parameters; }; dictionary AnchorChannelsConfig { @@ -167,13 +167,13 @@ interface Bolt11InvoiceDescription { interface Bolt11Payment { [Throws=NodeError] - PaymentId send([ByRef]Bolt11Invoice invoice, SendingParameters? sending_parameters); + PaymentId send([ByRef]Bolt11Invoice invoice, RouteParametersConfig? route_parameters); [Throws=NodeError] - PaymentId send_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat, SendingParameters? sending_parameters); + PaymentId send_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat, RouteParametersConfig? route_parameters); [Throws=NodeError] - void send_probes([ByRef]Bolt11Invoice invoice); + void send_probes([ByRef]Bolt11Invoice invoice, RouteParametersConfig? route_parameters); [Throws=NodeError] - void send_probes_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat); + void send_probes_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat, RouteParametersConfig? route_parameters); [Throws=NodeError] void claim_for_hash(PaymentHash payment_hash, u64 claimable_amount_msat, PaymentPreimage preimage); [Throws=NodeError] @@ -213,13 +213,13 @@ interface Bolt12Payment { interface SpontaneousPayment { [Throws=NodeError] - PaymentId send(u64 amount_msat, PublicKey node_id, SendingParameters? sending_parameters); + PaymentId send(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters); [Throws=NodeError] - PaymentId send_with_custom_tlvs(u64 amount_msat, PublicKey node_id, SendingParameters? sending_parameters, sequence custom_tlvs); + PaymentId send_with_custom_tlvs(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters, sequence custom_tlvs); [Throws=NodeError] - PaymentId send_with_preimage(u64 amount_msat, PublicKey node_id, PaymentPreimage preimage, SendingParameters? sending_parameters); + PaymentId send_with_preimage(u64 amount_msat, PublicKey node_id, PaymentPreimage preimage, RouteParametersConfig? route_parameters); [Throws=NodeError] - PaymentId send_with_preimage_and_custom_tlvs(u64 amount_msat, PublicKey node_id, sequence custom_tlvs, PaymentPreimage preimage, SendingParameters? sending_parameters); + PaymentId send_with_preimage_and_custom_tlvs(u64 amount_msat, PublicKey node_id, sequence custom_tlvs, PaymentPreimage preimage, RouteParametersConfig? route_parameters); [Throws=NodeError] void send_probes(u64 amount_msat, PublicKey node_id); }; @@ -254,7 +254,7 @@ interface LSPS1Liquidity { [Throws=NodeError] LSPS1OrderStatus request_channel(u64 lsp_balance_sat, u64 client_balance_sat, u32 channel_expiry_blocks, boolean announce_channel); [Throws=NodeError] - LSPS1OrderStatus check_order_status(OrderId order_id); + LSPS1OrderStatus check_order_status(LSPS1OrderId order_id); }; [Error] @@ -392,7 +392,7 @@ enum PaymentFailureReason { [Enum] interface ClosureReason { CounterpartyForceClosed(UntrustedString peer_msg); - HolderForceClosed(boolean? broadcasted_latest_txn); + HolderForceClosed(boolean? broadcasted_latest_txn, string message); LegacyCooperativeClosure(); CounterpartyInitiatedCooperativeClosure(); LocallyInitiatedCooperativeClosure(); @@ -402,8 +402,9 @@ interface ClosureReason { DisconnectedPeer(); OutdatedChannelManager(); CounterpartyCoopClosedUnfundedChannel(); + LocallyCoopClosedUnfundedChannel(); FundingBatchClosure(); - HTLCsTimedOut(); + HTLCsTimedOut( PaymentHash? payment_hash ); PeerFeerateTooLow(u32 peer_feerate_sat_per_kw, u32 required_feerate_sat_per_kw); }; @@ -456,11 +457,11 @@ dictionary PaymentDetails { u64 latest_update_timestamp; }; -dictionary SendingParameters { - MaxTotalRoutingFeeLimit? max_total_routing_fee_msat; - u32? max_total_cltv_expiry_delta; - u8? max_path_count; - u8? max_channel_saturation_power_of_half; +dictionary RouteParametersConfig { + u64? max_total_routing_fee_msat; + u32 max_total_cltv_expiry_delta; + u8 max_path_count; + u8 max_channel_saturation_power_of_half; }; dictionary CustomTlvRecord { @@ -469,13 +470,13 @@ dictionary CustomTlvRecord { }; dictionary LSPS1OrderStatus { - OrderId order_id; - OrderParameters order_params; - PaymentInfo payment_options; - ChannelOrderInfo? channel_state; + LSPS1OrderId order_id; + LSPS1OrderParams order_params; + LSPS1PaymentInfo payment_options; + LSPS1ChannelInfo? channel_state; }; -dictionary OrderParameters { +dictionary LSPS1OrderParams { u64 lsp_balance_sat; u64 client_balance_sat; u16 required_channel_confirmations; @@ -485,22 +486,22 @@ dictionary OrderParameters { boolean announce_channel; }; -dictionary PaymentInfo { - Bolt11PaymentInfo? bolt11; - OnchainPaymentInfo? onchain; +dictionary LSPS1PaymentInfo { + LSPS1Bolt11PaymentInfo? bolt11; + LSPS1OnchainPaymentInfo? onchain; }; -dictionary Bolt11PaymentInfo { - PaymentState state; - DateTime expires_at; +dictionary LSPS1Bolt11PaymentInfo { + LSPS1PaymentState state; + LSPSDateTime expires_at; u64 fee_total_sat; u64 order_total_sat; Bolt11Invoice invoice; }; -dictionary OnchainPaymentInfo { - PaymentState state; - DateTime expires_at; +dictionary LSPS1OnchainPaymentInfo { + LSPS1PaymentState state; + LSPSDateTime expires_at; u64 fee_total_sat; u64 order_total_sat; Address address; @@ -509,24 +510,18 @@ dictionary OnchainPaymentInfo { Address? refund_onchain_address; }; -dictionary ChannelOrderInfo { - DateTime funded_at; +dictionary LSPS1ChannelInfo { + LSPSDateTime funded_at; OutPoint funding_outpoint; - DateTime expires_at; + LSPSDateTime expires_at; }; -enum PaymentState { +enum LSPS1PaymentState { "ExpectPayment", "Paid", "Refunded", }; -[Enum] -interface MaxTotalRoutingFeeLimit { - None (); - Some ( u64 amount_msat ); -}; - [NonExhaustive] enum Network { "Bitcoin", @@ -861,7 +856,7 @@ typedef string UntrustedString; typedef string NodeAlias; [Custom] -typedef string OrderId; +typedef string LSPS1OrderId; [Custom] -typedef string DateTime; +typedef string LSPSDateTime; diff --git a/src/builder.rs b/src/builder.rs index 289c2954c..094c21e72 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -48,7 +48,7 @@ use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, }; -use lightning::sign::EntropySource; +use lightning::sign::{EntropySource, NodeSigner}; use lightning::util::persist::{ read_channel_monitors, CHANNEL_MANAGER_PERSISTENCE_KEY, @@ -173,17 +173,17 @@ pub enum BuildError { RuntimeSetupFailed, /// We failed to read data from the [`KVStore`]. /// - /// [`KVStore`]: lightning::util::persist::KVStore + /// [`KVStore`]: lightning::util::persist::KVStoreSync ReadFailed, /// We failed to write data to the [`KVStore`]. /// - /// [`KVStore`]: lightning::util::persist::KVStore + /// [`KVStore`]: lightning::util::persist::KVStoreSync WriteFailed, /// We failed to access the given `storage_dir_path`. StoragePathAccessFailed, /// We failed to setup our [`KVStore`]. /// - /// [`KVStore`]: lightning::util::persist::KVStore + /// [`KVStore`]: lightning::util::persist::KVStoreSync KVStoreSetupFailed, /// We failed to setup the onchain wallet. WalletSetupFailed, @@ -1275,15 +1275,6 @@ fn build_with_store_internal( }, }; - // Initialize the ChainMonitor - let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( - Some(Arc::clone(&chain_source)), - Arc::clone(&tx_broadcaster), - Arc::clone(&logger), - Arc::clone(&fee_estimator), - Arc::clone(&kv_store), - )); - // Initialize the KeysManager let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| { log_error!(logger, "Failed to get current time: {}", e); @@ -1299,6 +1290,19 @@ fn build_with_store_internal( Arc::clone(&logger), )); + let peer_storage_key = keys_manager.get_peer_storage_key(); + + // Initialize the ChainMonitor + let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( + Some(Arc::clone(&chain_source)), + Arc::clone(&tx_broadcaster), + Arc::clone(&logger), + Arc::clone(&fee_estimator), + Arc::clone(&kv_store), + Arc::clone(&keys_manager), + peer_storage_key, + )); + // Initialize the network graph, scorer, and router let network_graph = match io::utils::read_network_graph(Arc::clone(&kv_store), Arc::clone(&logger)) { @@ -1359,17 +1363,6 @@ fn build_with_store_internal( }; let mut user_config = default_user_config(&config); - if liquidity_source_config.and_then(|lsc| lsc.lsps2_client.as_ref()).is_some() { - // Generally allow claiming underpaying HTLCs as the LSP will skim off some fee. We'll - // check that they don't take too much before claiming. - user_config.channel_config.accept_underpaying_htlcs = true; - - // FIXME: When we're an LSPS2 client, set maximum allowed inbound HTLC value in flight - // to 100%. We should eventually be able to set this on a per-channel basis, but for - // now we just bump the default for all channels. - user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = - 100; - } if liquidity_source_config.and_then(|lsc| lsc.lsps2_service.as_ref()).is_some() { // If we act as an LSPS2 service, we need to to be able to intercept HTLCs and forward the @@ -1447,8 +1440,8 @@ fn build_with_store_internal( // Give ChannelMonitors to ChainMonitor for (_blockhash, channel_monitor) in channel_monitors.into_iter() { - let funding_outpoint = channel_monitor.get_funding_txo().0; - chain_monitor.watch_channel(funding_outpoint, channel_monitor).map_err(|e| { + let channel_id = channel_monitor.channel_id(); + chain_monitor.watch_channel(channel_id, channel_monitor).map_err(|e| { log_error!(logger, "Failed to watch channel monitor: {:?}", e); BuildError::InvalidChannelMonitor })?; @@ -1560,6 +1553,7 @@ fn build_with_store_internal( as Arc, onion_message_handler: Arc::clone(&onion_messenger), custom_message_handler, + send_only_message_handler: Arc::clone(&chain_monitor), }, GossipSync::Rapid(_) => MessageHandler { chan_handler: Arc::clone(&channel_manager), @@ -1567,6 +1561,7 @@ fn build_with_store_internal( as Arc, onion_message_handler: Arc::clone(&onion_messenger), custom_message_handler, + send_only_message_handler: Arc::clone(&chain_monitor), }, GossipSync::None => { unreachable!("We must always have a gossip sync!"); @@ -1611,7 +1606,7 @@ fn build_with_store_internal( Ok(output_sweeper) => Arc::new(output_sweeper), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { - Arc::new(OutputSweeper::new( + Arc::new(OutputSweeper::new_with_kv_store_sync( channel_manager.current_best_block(), Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator), diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index c282a6141..7157e5a4f 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -173,7 +173,7 @@ impl BitcoindChainSource { if let Some(worst_channel_monitor_block_hash) = chain_monitor .list_monitors() .iter() - .flat_map(|(txo, _)| chain_monitor.get_monitor(*txo)) + .flat_map(|channel_id| chain_monitor.get_monitor(*channel_id)) .map(|m| m.current_best_block()) .min_by_key(|b| b.height) .map(|b| b.block_hash) @@ -1381,11 +1381,11 @@ impl Listen for ChainListener { self.output_sweeper.block_connected(block, height); } - fn block_disconnected(&self, header: &bitcoin::block::Header, height: u32) { - self.onchain_wallet.block_disconnected(header, height); - self.channel_manager.block_disconnected(header, height); - self.chain_monitor.block_disconnected(header, height); - self.output_sweeper.block_disconnected(header, height); + fn blocks_disconnected(&self, fork_point_block: lightning::chain::BestBlock) { + self.onchain_wallet.blocks_disconnected(fork_point_block); + self.channel_manager.blocks_disconnected(fork_point_block); + self.chain_monitor.blocks_disconnected(fork_point_block); + self.output_sweeper.blocks_disconnected(fork_point_block); } } diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index b6d37409b..40d929ce7 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -402,7 +402,7 @@ impl ElectrumRuntimeStatus { struct ElectrumRuntimeClient { electrum_client: Arc, - bdk_electrum_client: Arc>, + bdk_electrum_client: Arc>>, tx_sync: Arc>>, runtime: Arc, config: Arc, @@ -424,12 +424,7 @@ impl ElectrumRuntimeClient { Error::ConnectionFailed })?, ); - let electrum_client_2 = - ElectrumClient::from_config(&server_url, electrum_config).map_err(|e| { - log_error!(logger, "Failed to connect to electrum server: {}", e); - Error::ConnectionFailed - })?; - let bdk_electrum_client = Arc::new(BdkElectrumClient::new(electrum_client_2)); + let bdk_electrum_client = Arc::new(BdkElectrumClient::new(Arc::clone(&electrum_client))); let tx_sync = Arc::new( ElectrumSyncClient::new(server_url.clone(), Arc::clone(&logger)).map_err(|e| { log_error!(logger, "Failed to connect to electrum server: {}", e); diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index a8806a413..8e9a4dbd4 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -57,19 +57,6 @@ impl EsploraChainSource { kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, ) -> Self { - // FIXME / TODO: We introduced this to make `bdk_esplora` work separately without updating - // `lightning-transaction-sync`. We should revert this as part of of the upgrade to LDK 0.2. - let mut client_builder_0_11 = esplora_client_0_11::Builder::new(&server_url); - client_builder_0_11 = client_builder_0_11.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - - for (header_name, header_value) in &headers { - client_builder_0_11 = client_builder_0_11.header(header_name, header_value); - } - - let esplora_client_0_11 = client_builder_0_11.build_async().unwrap(); - let tx_sync = - Arc::new(EsploraSyncClient::from_client(esplora_client_0_11, Arc::clone(&logger))); - let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); @@ -78,6 +65,8 @@ impl EsploraChainSource { } let esplora_client = client_builder.build_async().unwrap(); + let tx_sync = + Arc::new(EsploraSyncClient::from_client(esplora_client.clone(), Arc::clone(&logger))); let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); diff --git a/src/config.rs b/src/config.rs index 02df8bbc7..84f62d220 100644 --- a/src/config.rs +++ b/src/config.rs @@ -8,10 +8,10 @@ //! Objects for configuring the node. use crate::logger::LogLevel; -use crate::payment::SendingParameters; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; +use lightning::routing::router::RouteParametersConfig; use lightning::util::config::ChannelConfig as LdkChannelConfig; use lightning::util::config::MaxDustHTLCExposure as LdkMaxDustHTLCExposure; use lightning::util::config::UserConfig; @@ -114,9 +114,9 @@ pub const WALLET_KEYS_SEED_LEN: usize = 64; /// | `probing_liquidity_limit_multiplier` | 3 | /// | `log_level` | Debug | /// | `anchor_channels_config` | Some(..) | -/// | `sending_parameters` | None | +/// | `route_parameters` | None | /// -/// See [`AnchorChannelsConfig`] and [`SendingParameters`] for more information regarding their +/// See [`AnchorChannelsConfig`] and [`RouteParametersConfig`] for more information regarding their /// respective default values. /// /// [`Node`]: crate::Node @@ -173,12 +173,12 @@ pub struct Config { pub anchor_channels_config: Option, /// Configuration options for payment routing and pathfinding. /// - /// Setting the `SendingParameters` provides flexibility to customize how payments are routed, + /// Setting the [`RouteParametersConfig`] provides flexibility to customize how payments are routed, /// including setting limits on routing fees, CLTV expiry, and channel utilization. /// /// **Note:** If unset, default parameters will be used, and you will be able to override the /// parameters on a per-payment basis in the corresponding method calls. - pub sending_parameters: Option, + pub route_parameters: Option, } impl Default for Config { @@ -191,7 +191,7 @@ impl Default for Config { trusted_peers_0conf: Vec::new(), probing_liquidity_limit_multiplier: DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER, anchor_channels_config: Some(AnchorChannelsConfig::default()), - sending_parameters: None, + route_parameters: None, node_alias: None, } } diff --git a/src/data_store.rs b/src/data_store.rs index 78e3e7870..45802c272 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -143,18 +143,18 @@ where let store_key = object.id().encode_to_hex_str(); let data = object.encode(); self.kv_store - .write(&self.primary_namespace, &self.secondary_namespace, &store_key, &data) + .write(&self.primary_namespace, &self.secondary_namespace, &store_key, data) .map_err(|e| { - log_error!( - self.logger, - "Write for key {}/{}/{} failed due to: {}", - &self.primary_namespace, - &self.secondary_namespace, - store_key, - e - ); - Error::PersistenceFailed - })?; + log_error!( + self.logger, + "Write for key {}/{}/{} failed due to: {}", + &self.primary_namespace, + &self.secondary_namespace, + store_key, + e + ); + Error::PersistenceFailed + })?; Ok(()) } } diff --git a/src/event.rs b/src/event.rs index ff94d51d1..bad1b84ab 100644 --- a/src/event.rs +++ b/src/event.rs @@ -38,6 +38,9 @@ use lightning::impl_writeable_tlv_based_enum; use lightning::ln::channelmanager::PaymentId; use lightning::ln::types::ChannelId; use lightning::routing::gossip::NodeId; +use lightning::util::config::{ + ChannelConfigOverrides, ChannelConfigUpdate, ChannelHandshakeConfigUpdate, +}; use lightning::util::errors::APIError; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; @@ -56,7 +59,6 @@ use core::task::{Poll, Waker}; use std::collections::VecDeque; use std::ops::Deref; use std::sync::{Arc, Condvar, Mutex}; -use std::time::Duration; /// An event emitted by [`Node`], which should be handled by the user. /// @@ -358,7 +360,7 @@ where EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, - &data, + data, ) .map_err(|e| { log_error!( @@ -544,7 +546,7 @@ where Err(err) => { log_error!(self.logger, "Failed to create funding transaction: {}", err); self.channel_manager - .force_close_without_broadcasting_txn( + .force_close_broadcasting_latest_txn( &temporary_channel_id, &counterparty_node_id, "Failed to create funding transaction".to_string(), @@ -565,13 +567,10 @@ where payment_hash, purpose, amount_msat, - receiver_node_id: _, - via_channel_id: _, - via_user_channel_id: _, claim_deadline, onion_fields, counterparty_skimmed_fee_msat, - payment_id: _, + .. } => { let payment_id = PaymentId(payment_hash.0); if let Some(info) = self.payment_store.get(&payment_id) { @@ -1043,26 +1042,17 @@ where LdkEvent::PaymentPathFailed { .. } => {}, LdkEvent::ProbeSuccessful { .. } => {}, LdkEvent::ProbeFailed { .. } => {}, - LdkEvent::HTLCHandlingFailed { failed_next_destination, .. } => { + LdkEvent::HTLCHandlingFailed { failure_type, .. } => { if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_htlc_handling_failed(failed_next_destination); + liquidity_source.handle_htlc_handling_failed(failure_type); } }, - LdkEvent::PendingHTLCsForwardable { time_forwardable } => { - let forwarding_channel_manager = self.channel_manager.clone(); - let min = time_forwardable.as_millis() as u64; - - let future = async move { - let millis_to_sleep = thread_rng().gen_range(min..min * 5) as u64; - tokio::time::sleep(Duration::from_millis(millis_to_sleep)).await; - - forwarding_channel_manager.process_pending_htlc_forwards(); - }; - - self.runtime.spawn_cancellable_background_task(future); - }, LdkEvent::SpendableOutputs { outputs, channel_id } => { - match self.output_sweeper.track_spendable_outputs(outputs, channel_id, true, None) { + match self + .output_sweeper + .track_spendable_outputs(outputs, channel_id, true, None) + .await + { Ok(_) => return Ok(()), Err(_) => { log_error!(self.logger, "Failed to track spendable outputs"); @@ -1084,7 +1074,7 @@ where log_error!(self.logger, "Rejecting inbound announced channel from peer {} due to missing configuration: {}", counterparty_node_id, err); self.channel_manager - .force_close_without_broadcasting_txn( + .force_close_broadcasting_latest_txn( &temporary_channel_id, &counterparty_node_id, "Channel request rejected".to_string(), @@ -1128,7 +1118,7 @@ where required_amount_sats, ); self.channel_manager - .force_close_without_broadcasting_txn( + .force_close_broadcasting_latest_txn( &temporary_channel_id, &counterparty_node_id, "Channel request rejected".to_string(), @@ -1145,7 +1135,7 @@ where counterparty_node_id, ); self.channel_manager - .force_close_without_broadcasting_txn( + .force_close_broadcasting_latest_txn( &temporary_channel_id, &counterparty_node_id, "Channel request rejected".to_string(), @@ -1157,19 +1147,46 @@ where } } - let user_channel_id: u128 = rand::thread_rng().gen::(); + let user_channel_id: u128 = thread_rng().gen::(); let allow_0conf = self.config.trusted_peers_0conf.contains(&counterparty_node_id); + let mut channel_override_config = None; + if let Some((lsp_node_id, _)) = self + .liquidity_source + .as_ref() + .and_then(|ls| ls.as_ref().get_lsps2_lsp_details()) + { + if lsp_node_id == counterparty_node_id { + // When we're an LSPS2 client, allow claiming underpaying HTLCs as the LSP will skim off some fee. We'll + // check that they don't take too much before claiming. + // + // We also set maximum allowed inbound HTLC value in flight + // to 100%. We should eventually be able to set this on a per-channel basis, but for + // now we just bump the default for all channels. + channel_override_config = Some(ChannelConfigOverrides { + handshake_overrides: Some(ChannelHandshakeConfigUpdate { + max_inbound_htlc_value_in_flight_percent_of_channel: Some(100), + ..Default::default() + }), + update_overrides: Some(ChannelConfigUpdate { + accept_underpaying_htlcs: Some(true), + ..Default::default() + }), + }); + } + } let res = if allow_0conf { self.channel_manager.accept_inbound_channel_from_trusted_peer_0conf( &temporary_channel_id, &counterparty_node_id, user_channel_id, + channel_override_config, ) } else { self.channel_manager.accept_inbound_channel( &temporary_channel_id, &counterparty_node_id, user_channel_id, + channel_override_config, ) }; @@ -1469,7 +1486,7 @@ where BumpTransactionEvent::HTLCResolution { .. } => {}, } - self.bump_tx_event_handler.handle_event(&bte); + self.bump_tx_event_handler.handle_event(&bte).await; }, LdkEvent::OnionMessageIntercepted { .. } => { debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); @@ -1477,6 +1494,15 @@ where LdkEvent::OnionMessagePeerConnected { .. } => { debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); }, + LdkEvent::PersistStaticInvoice { .. } => { + debug_assert!(false, "We currently don't support static invoice persistence, so this event should never be emitted."); + }, + LdkEvent::StaticInvoiceRequested { .. } => { + debug_assert!(false, "We currently don't support static invoice persistence, so this event should never be emitted."); + }, + LdkEvent::FundingTransactionReadyForSigning { .. } => { + debug_assert!(false, "We currently don't support interactive-tx, so this event should never be emitted."); + }, } Ok(()) } diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 984e4da8f..02d321787 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -15,26 +15,29 @@ pub use crate::config::{ EsploraSyncConfig, MaxDustHTLCExposure, }; pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; -pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig, OnchainPaymentInfo, PaymentInfo}; +pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig}; pub use crate::logger::{LogLevel, LogRecord, LogWriter}; pub use crate::payment::store::{ ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, }; -pub use crate::payment::{MaxTotalRoutingFeeLimit, QrPaymentResult, SendingParameters}; +pub use crate::payment::QrPaymentResult; pub use lightning::chain::channelmonitor::BalanceSource; pub use lightning::events::{ClosureReason, PaymentFailureReason}; pub use lightning::ln::types::ChannelId; pub use lightning::offers::offer::OfferId; pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; -pub use lightning::util::string::UntrustedString; +pub use lightning::routing::router::RouteParametersConfig; +pub use lightning_types::string::UntrustedString; pub use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; pub use lightning_invoice::{Description, SignedRawBolt11Invoice}; -pub use lightning_liquidity::lsps1::msgs::ChannelInfo as ChannelOrderInfo; -pub use lightning_liquidity::lsps1::msgs::{OrderId, OrderParameters, PaymentState}; +pub use lightning_liquidity::lsps0::ser::LSPSDateTime; +pub use lightning_liquidity::lsps1::msgs::{ + LSPS1ChannelInfo, LSPS1OrderId, LSPS1OrderParams, LSPS1PaymentState, +}; pub use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, Txid}; @@ -42,8 +45,6 @@ pub use bip39::Mnemonic; pub use vss_client::headers::{VssHeaderProvider, VssHeaderProviderError}; -pub type DateTime = chrono::DateTime; - use crate::UniffiCustomTypeConverter; use crate::builder::sanitize_alias; @@ -125,9 +126,8 @@ impl From for OfferAmount { fn from(ldk_amount: LdkAmount) -> Self { match ldk_amount { LdkAmount::Bitcoin { amount_msats } => OfferAmount::Bitcoin { amount_msats }, - LdkAmount::Currency { iso4217_code, amount } => OfferAmount::Currency { - iso4217_code: iso4217_code.iter().map(|&b| b as char).collect(), - amount, + LdkAmount::Currency { iso4217_code, amount } => { + OfferAmount::Currency { iso4217_code: iso4217_code.as_str().to_owned(), amount } }, } } @@ -1066,13 +1066,71 @@ impl std::fmt::Display for Bolt11Invoice { } } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct LSPS1PaymentInfo { + /// A Lightning payment using BOLT 11. + pub bolt11: Option, + /// An onchain payment. + pub onchain: Option, +} + +#[cfg(feature = "uniffi")] +impl From for LSPS1PaymentInfo { + fn from(value: lightning_liquidity::lsps1::msgs::LSPS1PaymentInfo) -> Self { + LSPS1PaymentInfo { + bolt11: value.bolt11.map(|b| b.into()), + onchain: value.onchain.map(|o| o.into()), + } + } +} + +/// An onchain payment. +#[cfg(feature = "uniffi")] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct LSPS1OnchainPaymentInfo { + /// Indicates the current state of the payment. + pub state: lightning_liquidity::lsps1::msgs::LSPS1PaymentState, + /// The datetime when the payment option expires. + pub expires_at: LSPSDateTime, + /// The total fee the LSP will charge to open this channel in satoshi. + pub fee_total_sat: u64, + /// The amount the client needs to pay to have the requested channel openend. + pub order_total_sat: u64, + /// An on-chain address the client can send [`Self::order_total_sat`] to to have the channel + /// opened. + pub address: bitcoin::Address, + /// The minimum number of block confirmations that are required for the on-chain payment to be + /// considered confirmed. + pub min_onchain_payment_confirmations: Option, + /// The minimum fee rate for the on-chain payment in case the client wants the payment to be + /// confirmed without a confirmation. + pub min_fee_for_0conf: Arc, + /// The address where the LSP will send the funds if the order fails. + pub refund_onchain_address: Option, +} + +#[cfg(feature = "uniffi")] +impl From for LSPS1OnchainPaymentInfo { + fn from(value: lightning_liquidity::lsps1::msgs::LSPS1OnchainPaymentInfo) -> Self { + Self { + state: value.state, + expires_at: value.expires_at, + fee_total_sat: value.fee_total_sat, + order_total_sat: value.order_total_sat, + address: value.address, + min_onchain_payment_confirmations: value.min_onchain_payment_confirmations, + min_fee_for_0conf: Arc::new(value.min_fee_for_0conf), + refund_onchain_address: value.refund_onchain_address, + } + } +} /// A Lightning payment using BOLT 11. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Bolt11PaymentInfo { +pub struct LSPS1Bolt11PaymentInfo { /// Indicates the current state of the payment. - pub state: PaymentState, + pub state: LSPS1PaymentState, /// The datetime when the payment option expires. - pub expires_at: chrono::DateTime, + pub expires_at: LSPSDateTime, /// The total fee the LSP will charge to open this channel in satoshi. pub fee_total_sat: u64, /// The amount the client needs to pay to have the requested channel openend. @@ -1081,8 +1139,8 @@ pub struct Bolt11PaymentInfo { pub invoice: Arc, } -impl From for Bolt11PaymentInfo { - fn from(info: lightning_liquidity::lsps1::msgs::Bolt11PaymentInfo) -> Self { +impl From for LSPS1Bolt11PaymentInfo { + fn from(info: lightning_liquidity::lsps1::msgs::LSPS1Bolt11PaymentInfo) -> Self { Self { state: info.state, expires_at: info.expires_at, @@ -1093,7 +1151,7 @@ impl From for Bolt11Payment } } -impl UniffiCustomTypeConverter for OrderId { +impl UniffiCustomTypeConverter for LSPS1OrderId { type Builtin = String; fn into_custom(val: Self::Builtin) -> uniffi::Result { @@ -1105,11 +1163,11 @@ impl UniffiCustomTypeConverter for OrderId { } } -impl UniffiCustomTypeConverter for DateTime { +impl UniffiCustomTypeConverter for LSPSDateTime { type Builtin = String; fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(DateTime::from_str(&val).map_err(|_| Error::InvalidDateTime)?) + Ok(LSPSDateTime::from_str(&val).map_err(|_| Error::InvalidDateTime)?) } fn from_custom(obj: Self) -> Self::Builtin { diff --git a/src/io/sqlite_store/migrations.rs b/src/io/sqlite_store/migrations.rs index 0486b8a4f..15e60bcc2 100644 --- a/src/io/sqlite_store/migrations.rs +++ b/src/io/sqlite_store/migrations.rs @@ -78,7 +78,7 @@ mod tests { use crate::io::sqlite_store::SqliteStore; use crate::io::test_utils::{do_read_write_remove_list_persist, random_storage_path}; - use lightning::util::persist::KVStore; + use lightning::util::persist::KVStoreSync; use rusqlite::{named_params, Connection}; diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index b72db5a2b..4006ab2cc 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -9,8 +9,9 @@ use crate::io::utils::check_namespace_key_validity; use lightning::io; -use lightning::util::persist::KVStore; -use lightning::util::string::PrintableString; +use lightning::util::persist::KVStoreSync; + +use lightning_types::string::PrintableString; use rusqlite::{named_params, Connection}; @@ -34,7 +35,7 @@ pub const DEFAULT_KV_TABLE_NAME: &str = "ldk_data"; // The current SQLite `user_version`, which we can use if we'd ever need to do a schema migration. const SCHEMA_USER_VERSION: u16 = 2; -/// A [`KVStore`] implementation that writes to and reads from an [SQLite] database. +/// A [`KVStoreSync`] implementation that writes to and reads from an [SQLite] database. /// /// [SQLite]: https://sqlite.org pub struct SqliteStore { @@ -129,7 +130,7 @@ impl SqliteStore { } } -impl KVStore for SqliteStore { +impl KVStoreSync for SqliteStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { @@ -179,7 +180,7 @@ impl KVStore for SqliteStore { } fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index df806779e..244dd9cdc 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -9,7 +9,7 @@ use lightning::ln::functional_test_utils::{ connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, }; -use lightning::util::persist::{read_channel_monitors, KVStore, KVSTORE_NAMESPACE_KEY_MAX_LEN}; +use lightning::util::persist::{read_channel_monitors, KVStoreSync, KVSTORE_NAMESPACE_KEY_MAX_LEN}; use lightning::events::ClosureReason; use lightning::util::test_utils; @@ -29,23 +29,24 @@ pub(crate) fn random_storage_path() -> PathBuf { temp_path } -pub(crate) fn do_read_write_remove_list_persist(kv_store: &K) { - let data = [42u8; 32]; +pub(crate) fn do_read_write_remove_list_persist(kv_store: &K) { + let data = vec![42u8; 32]; let primary_namespace = "testspace"; let secondary_namespace = "testsubspace"; let key = "testkey"; // Test the basic KVStore operations. - kv_store.write(primary_namespace, secondary_namespace, key, &data).unwrap(); + kv_store.write(primary_namespace, secondary_namespace, key, data.clone()).unwrap(); // Test empty primary/secondary namespaces are allowed, but not empty primary namespace and non-empty // secondary primary_namespace, and not empty key. - kv_store.write("", "", key, &data).unwrap(); - let res = std::panic::catch_unwind(|| kv_store.write("", secondary_namespace, key, &data)); + kv_store.write("", "", key, data.clone()).unwrap(); + let res = + std::panic::catch_unwind(|| kv_store.write("", secondary_namespace, key, data.clone())); assert!(res.is_err()); let res = std::panic::catch_unwind(|| { - kv_store.write(primary_namespace, secondary_namespace, "", &data) + kv_store.write(primary_namespace, secondary_namespace, "", data.clone()) }); assert!(res.is_err()); @@ -63,7 +64,7 @@ pub(crate) fn do_read_write_remove_list_persist(kv_s // Ensure we have no issue operating with primary_namespace/secondary_namespace/key being KVSTORE_NAMESPACE_KEY_MAX_LEN let max_chars: String = std::iter::repeat('A').take(KVSTORE_NAMESPACE_KEY_MAX_LEN).collect(); - kv_store.write(&max_chars, &max_chars, &max_chars, &data).unwrap(); + kv_store.write(&max_chars, &max_chars, &max_chars, data.clone()).unwrap(); let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap(); assert_eq!(listed_keys.len(), 1); @@ -80,7 +81,7 @@ pub(crate) fn do_read_write_remove_list_persist(kv_s // Integration-test the given KVStore implementation. Test relaying a few payments and check that // the persisted data is updated the appropriate number of times. -pub(crate) fn do_test_store(store_0: &K, store_1: &K) { +pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let chanmon_cfgs = create_chanmon_cfgs(2); let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let chain_mon_0 = test_utils::TestChainMonitor::new( @@ -145,18 +146,19 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { // Force close because cooperative close doesn't result in any persisted // updates. + let message = "Channel force-closed".to_owned(); nodes[0] .node .force_close_broadcasting_latest_txn( &nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), - "whoops".to_string(), + message.clone(), ) .unwrap(); check_closed_event!( nodes[0], 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, [nodes[1].node.get_our_node_id()], 100000 ); diff --git a/src/io/utils.rs b/src/io/utils.rs index 06a1017ba..51e7be505 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -31,9 +31,10 @@ use lightning::util::persist::{ SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; -use lightning::util::string::PrintableString; use lightning::util::sweep::OutputSweeper; +use lightning_types::string::PrintableString; + use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; @@ -251,7 +252,7 @@ pub(crate) fn read_output_sweeper( kv_store, logger.clone(), ); - OutputSweeper::read(&mut reader, args).map_err(|e| { + OutputSweeper::read_with_kv_store_sync(&mut reader, args).map_err(|e| { log_error!(logger, "Failed to deserialize OutputSweeper: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize OutputSweeper") }) @@ -286,7 +287,7 @@ where NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, NODE_METRICS_KEY, - &data, + data, ) .map_err(|e| { log_error!( @@ -441,7 +442,7 @@ macro_rules! impl_read_write_change_set_type { L::Target: LdkLogger, { let data = ChangeSetSerWrapper(value).encode(); - kv_store.write($primary_namespace, $secondary_namespace, $key, &data).map_err(|e| { + kv_store.write($primary_namespace, $secondary_namespace, $key, data).map_err(|e| { log_error!( logger, "Writing data to key {}/{}/{} failed due to: {}", diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index e2cfc3c7b..87f966a9b 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -10,7 +10,7 @@ use crate::runtime::Runtime; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::io::{self, Error, ErrorKind}; -use lightning::util::persist::KVStore; +use lightning::util::persist::KVStoreSync; use prost::Message; use rand::RngCore; #[cfg(test)] @@ -38,7 +38,7 @@ type CustomRetryPolicy = FilteredRetryPolicy< Box bool + 'static + Send + Sync>, >; -/// A [`KVStore`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. +/// A [`KVStoreSync`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. pub struct VssStore { client: VssClient, store_id: String, @@ -127,7 +127,7 @@ impl VssStore { } } -impl KVStore for VssStore { +impl KVStoreSync for VssStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { @@ -160,11 +160,11 @@ impl KVStore for VssStore { } fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; let version = -1; - let storable = self.storable_builder.build(buf.to_vec(), version); + let storable = self.storable_builder.build(buf, version); let request = PutObjectRequest { store_id: self.store_id.clone(), global_version: None, diff --git a/src/lib.rs b/src/lib.rs index 9035d5361..160762dd2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -158,7 +158,7 @@ use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; -use lightning_background_processor::process_events_async; +use lightning_background_processor::process_events_async_with_kv_store_sync; use bitcoin::secp256k1::PublicKey; @@ -521,6 +521,9 @@ impl Node { let background_chan_man = Arc::clone(&self.channel_manager); let background_gossip_sync = self.gossip_source.as_gossip_sync(); let background_peer_man = Arc::clone(&self.peer_manager); + let background_liquidity_man_opt = + self.liquidity_source.as_ref().map(|ls| ls.liquidity_manager()); + let background_sweeper = Arc::clone(&self.output_sweeper); let background_onion_messenger = Arc::clone(&self.onion_messenger); let background_logger = Arc::clone(&self.logger); let background_error_logger = Arc::clone(&self.logger); @@ -547,7 +550,7 @@ impl Node { }; self.runtime.spawn_background_processor_task(async move { - process_events_async( + process_events_async_with_kv_store_sync( background_persister, |e| background_event_handler.handle_event(e), background_chain_mon, @@ -555,6 +558,8 @@ impl Node { Some(background_onion_messenger), background_gossip_sync, background_peer_man, + background_liquidity_man_opt, + Some(background_sweeper), background_logger, Some(background_scorer), sleeper, @@ -1193,12 +1198,17 @@ impl Node { self.runtime.block_on(async move { if chain_source.is_transaction_based() { chain_source.update_fee_rate_estimates().await?; - chain_source.sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper).await?; + chain_source + .sync_lightning_wallet(sync_cman, sync_cmon, Arc::clone(&sync_sweeper)) + .await?; chain_source.sync_onchain_wallet().await?; } else { chain_source.update_fee_rate_estimates().await?; - chain_source.poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper).await?; + chain_source + .poll_and_update_listeners(sync_cman, sync_cmon, Arc::clone(&sync_sweeper)) + .await?; } + let _ = sync_sweeper.regenerate_and_broadcast_spend_if_necessary().await; Ok(()) }) } @@ -1247,35 +1257,16 @@ impl Node { open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) { if force { - if self.config.anchor_channels_config.as_ref().map_or(false, |acc| { - acc.trusted_peers_no_reserve.contains(&counterparty_node_id) - }) { - self.channel_manager - .force_close_without_broadcasting_txn( - &channel_details.channel_id, - &counterparty_node_id, - force_close_reason.unwrap_or_default(), - ) - .map_err(|e| { - log_error!( - self.logger, - "Failed to force-close channel to trusted peer: {:?}", - e - ); - Error::ChannelClosingFailed - })?; - } else { - self.channel_manager - .force_close_broadcasting_latest_txn( - &channel_details.channel_id, - &counterparty_node_id, - force_close_reason.unwrap_or_default(), - ) - .map_err(|e| { - log_error!(self.logger, "Failed to force-close channel: {:?}", e); - Error::ChannelClosingFailed - })?; - } + self.channel_manager + .force_close_broadcasting_latest_txn( + &channel_details.channel_id, + &counterparty_node_id, + force_close_reason.unwrap_or_default(), + ) + .map_err(|e| { + log_error!(self.logger, "Failed to force-close channel: {:?}", e); + Error::ChannelClosingFailed + })?; } else { self.channel_manager .close_channel(&channel_details.channel_id, &counterparty_node_id) @@ -1340,12 +1331,10 @@ impl Node { let mut total_lightning_balance_sats = 0; let mut lightning_balances = Vec::new(); - for (funding_txo, channel_id) in self.chain_monitor.list_monitors() { - match self.chain_monitor.get_monitor(funding_txo) { + for channel_id in self.chain_monitor.list_monitors() { + match self.chain_monitor.get_monitor(channel_id) { Ok(monitor) => { - // unwrap safety: `get_counterparty_node_id` will always be `Some` after 0.0.110 and - // LDK Node 0.1 depended on 0.0.115 already. - let counterparty_node_id = monitor.get_counterparty_node_id().unwrap(); + let counterparty_node_id = monitor.get_counterparty_node_id(); for ldk_balance in monitor.get_claimable_balances() { total_lightning_balance_sats += ldk_balance.claimable_amount_satoshis(); lightning_balances.push(LightningBalance::from_ldk_balance( diff --git a/src/liquidity.rs b/src/liquidity.rs index 6ee8066c1..5d0bf5afe 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -14,7 +14,7 @@ use crate::runtime::Runtime; use crate::types::{ChannelManager, KeysManager, LiquidityManager, PeerManager, Wallet}; use crate::{total_anchor_channels_reserve_sats, Config, Error}; -use lightning::events::HTLCDestination; +use lightning::events::HTLCHandlingFailureType; use lightning::ln::channelmanager::{InterceptId, MIN_FINAL_CLTV_EXPIRY_DELTA}; use lightning::ln::msgs::SocketAddress; use lightning::ln::types::ChannelId; @@ -22,14 +22,16 @@ use lightning::routing::router::{RouteHint, RouteHintHop}; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, InvoiceBuilder, RoutingFees}; -use lightning_liquidity::events::Event; -use lightning_liquidity::lsps0::ser::RequestId; +use lightning_liquidity::events::LiquidityEvent; +use lightning_liquidity::lsps0::ser::{LSPSDateTime, LSPSRequestId}; use lightning_liquidity::lsps1::client::LSPS1ClientConfig as LdkLSPS1ClientConfig; use lightning_liquidity::lsps1::event::LSPS1ClientEvent; -use lightning_liquidity::lsps1::msgs::{ChannelInfo, LSPS1Options, OrderId, OrderParameters}; +use lightning_liquidity::lsps1::msgs::{ + LSPS1ChannelInfo, LSPS1Options, LSPS1OrderId, LSPS1OrderParams, +}; use lightning_liquidity::lsps2::client::LSPS2ClientConfig as LdkLSPS2ClientConfig; use lightning_liquidity::lsps2::event::{LSPS2ClientEvent, LSPS2ServiceEvent}; -use lightning_liquidity::lsps2::msgs::{OpeningFeeParams, RawOpeningFeeParams}; +use lightning_liquidity::lsps2::msgs::{LSPS2OpeningFeeParams, LSPS2RawOpeningFeeParams}; use lightning_liquidity::lsps2::service::LSPS2ServiceConfig as LdkLSPS2ServiceConfig; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_liquidity::{LiquidityClientConfig, LiquidityServiceConfig}; @@ -41,7 +43,7 @@ use bitcoin::secp256k1::{PublicKey, Secp256k1}; use tokio::sync::oneshot; -use chrono::{DateTime, Utc}; +use chrono::Utc; use rand::Rng; @@ -62,10 +64,10 @@ struct LSPS1Client { token: Option, ldk_client_config: LdkLSPS1ClientConfig, pending_opening_params_requests: - Mutex>>, - pending_create_order_requests: Mutex>>, + Mutex>>, + pending_create_order_requests: Mutex>>, pending_check_order_status_requests: - Mutex>>, + Mutex>>, } #[derive(Debug, Clone)] @@ -80,8 +82,8 @@ struct LSPS2Client { lsp_address: SocketAddress, token: Option, ldk_client_config: LdkLSPS2ClientConfig, - pending_fee_requests: Mutex>>, - pending_buy_requests: Mutex>>, + pending_fee_requests: Mutex>>, + pending_buy_requests: Mutex>>, } #[derive(Debug, Clone)] @@ -221,16 +223,22 @@ where pub(crate) fn build(self) -> LiquiditySource { let liquidity_service_config = self.lsps2_service.as_ref().map(|s| { let lsps2_service_config = Some(s.ldk_service_config.clone()); + let lsps5_service_config = None; let advertise_service = s.service_config.advertise_service; - LiquidityServiceConfig { lsps2_service_config, advertise_service } + LiquidityServiceConfig { lsps2_service_config, lsps5_service_config, advertise_service } }); let lsps1_client_config = self.lsps1_client.as_ref().map(|s| s.ldk_client_config.clone()); let lsps2_client_config = self.lsps2_client.as_ref().map(|s| s.ldk_client_config.clone()); - let liquidity_client_config = - Some(LiquidityClientConfig { lsps1_client_config, lsps2_client_config }); + let lsps5_client_config = None; + let liquidity_client_config = Some(LiquidityClientConfig { + lsps1_client_config, + lsps2_client_config, + lsps5_client_config, + }); let liquidity_manager = Arc::new(LiquidityManager::new( + Arc::clone(&self.keys_manager), Arc::clone(&self.keys_manager), Arc::clone(&self.channel_manager), Some(Arc::clone(&self.chain_source)), @@ -275,13 +283,11 @@ where L::Target: LdkLogger, { pub(crate) fn set_peer_manager(&self, peer_manager: Arc) { - *self.peer_manager.write().unwrap() = Some(Arc::clone(&peer_manager)); - let process_msgs_callback = move || peer_manager.process_events(); - self.liquidity_manager.set_process_msgs_callback(process_msgs_callback); + *self.peer_manager.write().unwrap() = Some(peer_manager); } - pub(crate) fn liquidity_manager(&self) -> &LiquidityManager { - self.liquidity_manager.as_ref() + pub(crate) fn liquidity_manager(&self) -> Arc { + Arc::clone(&self.liquidity_manager) } pub(crate) fn get_lsps1_lsp_details(&self) -> Option<(PublicKey, SocketAddress)> { @@ -294,7 +300,7 @@ where pub(crate) async fn handle_next_event(&self) { match self.liquidity_manager.next_event_async().await { - Event::LSPS1Client(LSPS1ClientEvent::SupportedOptionsReady { + LiquidityEvent::LSPS1Client(LSPS1ClientEvent::SupportedOptionsReady { request_id, counterparty_node_id, supported_options, @@ -347,7 +353,7 @@ where ); } }, - Event::LSPS1Client(LSPS1ClientEvent::OrderCreated { + LiquidityEvent::LSPS1Client(LSPS1ClientEvent::OrderCreated { request_id, counterparty_node_id, order_id, @@ -405,7 +411,7 @@ where log_error!(self.logger, "Received unexpected LSPS1Client::OrderCreated event!"); } }, - Event::LSPS1Client(LSPS1ClientEvent::OrderStatus { + LiquidityEvent::LSPS1Client(LSPS1ClientEvent::OrderStatus { request_id, counterparty_node_id, order_id, @@ -463,7 +469,7 @@ where log_error!(self.logger, "Received unexpected LSPS1Client::OrderStatus event!"); } }, - Event::LSPS2Service(LSPS2ServiceEvent::GetInfo { + LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::GetInfo { request_id, counterparty_node_id, token, @@ -484,7 +490,7 @@ where if token != Some(required) { log_error!( self.logger, - "Rejecting LSPS2 request {:?} from counterparty {} as the client provided an invalid token.", + "Rejecting LSPS2 request {:?} from counterparty {} as the client provided an invalid token.", request_id, counterparty_node_id ); @@ -502,10 +508,8 @@ where } } - let mut valid_until: DateTime = Utc::now(); - valid_until += LSPS2_GETINFO_REQUEST_EXPIRY; - - let opening_fee_params = RawOpeningFeeParams { + let valid_until = LSPSDateTime(Utc::now() + LSPS2_GETINFO_REQUEST_EXPIRY); + let opening_fee_params = LSPS2RawOpeningFeeParams { min_fee_msat: service_config.min_channel_opening_fee_msat, proportional: service_config.channel_opening_fee_ppm, valid_until, @@ -533,7 +537,7 @@ where return; } }, - Event::LSPS2Service(LSPS2ServiceEvent::BuyRequest { + LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::BuyRequest { request_id, counterparty_node_id, opening_fee_params: _, @@ -600,7 +604,7 @@ where return; } }, - Event::LSPS2Service(LSPS2ServiceEvent::OpenChannel { + LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::OpenChannel { their_network_key, amt_to_forward_msat, opening_fee_msat: _, @@ -674,7 +678,7 @@ where return; } - let mut config = *self.channel_manager.get_current_default_configuration(); + let mut config = self.channel_manager.get_current_config().clone(); // We set these LSP-specific values during Node building, here we're making sure it's actually set. debug_assert_eq!( @@ -714,7 +718,7 @@ where }, } }, - Event::LSPS2Client(LSPS2ClientEvent::OpeningParametersReady { + LiquidityEvent::LSPS2Client(LSPS2ClientEvent::OpeningParametersReady { request_id, counterparty_node_id, opening_fee_params_menu, @@ -764,7 +768,7 @@ where ); } }, - Event::LSPS2Client(LSPS2ClientEvent::InvoiceParametersReady { + LiquidityEvent::LSPS2Client(LSPS2ClientEvent::InvoiceParametersReady { request_id, counterparty_node_id, intercept_scid, @@ -904,7 +908,7 @@ where return Err(Error::LiquidityRequestFailed); } - let order_params = OrderParameters { + let order_params = LSPS1OrderParams { lsp_balance_sat, client_balance_sat, required_channel_confirmations: lsp_limits.min_required_channel_confirmations, @@ -953,7 +957,7 @@ where } pub(crate) async fn lsps1_check_order_status( - &self, order_id: OrderId, + &self, order_id: LSPS1OrderId, ) -> Result { let lsps1_client = self.lsps1_client.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; let client_handler = self.liquidity_manager.lsps1_client_handler().ok_or_else(|| { @@ -1127,7 +1131,7 @@ where } async fn lsps2_send_buy_request( - &self, amount_msat: Option, opening_fee_params: OpeningFeeParams, + &self, amount_msat: Option, opening_fee_params: LSPS2OpeningFeeParams, ) -> Result { let lsps2_client = self.lsps2_client.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; @@ -1280,9 +1284,9 @@ where } } - pub(crate) fn handle_htlc_handling_failed(&self, failed_next_destination: HTLCDestination) { + pub(crate) fn handle_htlc_handling_failed(&self, failure_type: HTLCHandlingFailureType) { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.htlc_handling_failed(failed_next_destination) { + if let Err(e) = lsps2_service_handler.htlc_handling_failed(failure_type) { log_error!( self.logger, "LSPS2 service failed to handle HTLCHandlingFailed event: {:?}", @@ -1316,82 +1320,24 @@ pub(crate) struct LSPS1OpeningParamsResponse { #[derive(Debug, Clone)] pub struct LSPS1OrderStatus { /// The id of the channel order. - pub order_id: OrderId, + pub order_id: LSPS1OrderId, /// The parameters of channel order. - pub order_params: OrderParameters, + pub order_params: LSPS1OrderParams, /// Contains details about how to pay for the order. - pub payment_options: PaymentInfo, + pub payment_options: LSPS1PaymentInfo, /// Contains information about the channel state. - pub channel_state: Option, + pub channel_state: Option, } #[cfg(not(feature = "uniffi"))] -type PaymentInfo = lightning_liquidity::lsps1::msgs::PaymentInfo; - -/// Details regarding how to pay for an order. -#[cfg(feature = "uniffi")] -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct PaymentInfo { - /// A Lightning payment using BOLT 11. - pub bolt11: Option, - /// An onchain payment. - pub onchain: Option, -} - -#[cfg(feature = "uniffi")] -impl From for PaymentInfo { - fn from(value: lightning_liquidity::lsps1::msgs::PaymentInfo) -> Self { - PaymentInfo { - bolt11: value.bolt11.map(|b| b.into()), - onchain: value.onchain.map(|o| o.into()), - } - } -} - -/// An onchain payment. -#[cfg(feature = "uniffi")] -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct OnchainPaymentInfo { - /// Indicates the current state of the payment. - pub state: lightning_liquidity::lsps1::msgs::PaymentState, - /// The datetime when the payment option expires. - pub expires_at: chrono::DateTime, - /// The total fee the LSP will charge to open this channel in satoshi. - pub fee_total_sat: u64, - /// The amount the client needs to pay to have the requested channel openend. - pub order_total_sat: u64, - /// An on-chain address the client can send [`Self::order_total_sat`] to to have the channel - /// opened. - pub address: bitcoin::Address, - /// The minimum number of block confirmations that are required for the on-chain payment to be - /// considered confirmed. - pub min_onchain_payment_confirmations: Option, - /// The minimum fee rate for the on-chain payment in case the client wants the payment to be - /// confirmed without a confirmation. - pub min_fee_for_0conf: Arc, - /// The address where the LSP will send the funds if the order fails. - pub refund_onchain_address: Option, -} +type LSPS1PaymentInfo = lightning_liquidity::lsps1::msgs::LSPS1PaymentInfo; #[cfg(feature = "uniffi")] -impl From for OnchainPaymentInfo { - fn from(value: lightning_liquidity::lsps1::msgs::OnchainPaymentInfo) -> Self { - Self { - state: value.state, - expires_at: value.expires_at, - fee_total_sat: value.fee_total_sat, - order_total_sat: value.order_total_sat, - address: value.address, - min_onchain_payment_confirmations: value.min_onchain_payment_confirmations, - min_fee_for_0conf: Arc::new(value.min_fee_for_0conf), - refund_onchain_address: value.refund_onchain_address, - } - } -} +type LSPS1PaymentInfo = crate::ffi::LSPS1PaymentInfo; #[derive(Debug, Clone)] pub(crate) struct LSPS2FeeResponse { - opening_fee_params_menu: Vec, + opening_fee_params_menu: Vec, } #[derive(Debug, Clone)] @@ -1474,7 +1420,7 @@ impl LSPS1Liquidity { } /// Connects to the configured LSP and checks for the status of a previously-placed order. - pub fn check_order_status(&self, order_id: OrderId) -> Result { + pub fn check_order_status(&self, order_id: LSPS1OrderId) -> Result { let liquidity_source = self.liquidity_source.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; diff --git a/src/message_handler.rs b/src/message_handler.rs index cebd1ea07..25995a481 100644 --- a/src/message_handler.rs +++ b/src/message_handler.rs @@ -10,6 +10,7 @@ use crate::liquidity::LiquiditySource; use lightning::ln::peer_handler::CustomMessageHandler; use lightning::ln::wire::CustomMessageReader; use lightning::util::logger::Logger; +use lightning::util::ser::LengthLimitedRead; use lightning_types::features::{InitFeatures, NodeFeatures}; @@ -47,7 +48,7 @@ where { type CustomMessage = RawLSPSMessage; - fn read( + fn read( &self, message_type: u16, buffer: &mut RD, ) -> Result, lightning::ln::msgs::DecodeError> { match self { diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 92d7fc948..7dcb2817c 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -20,16 +20,14 @@ use crate::payment::store::{ LSPFeeLimits, PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, }; -use crate::payment::SendingParameters; use crate::peer_store::{PeerInfo, PeerStore}; use crate::runtime::Runtime; use crate::types::{ChannelManager, PaymentStore}; -use lightning::ln::bolt11_payment; use lightning::ln::channelmanager::{ - Bolt11InvoiceParameters, PaymentId, RecipientOnionFields, Retry, RetryableSendFailure, + Bolt11InvoiceParameters, Bolt11PaymentError, PaymentId, Retry, RetryableSendFailure, }; -use lightning::routing::router::{PaymentParameters, RouteParameters}; +use lightning::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; @@ -92,22 +90,17 @@ impl Bolt11Payment { /// Send a payment given an invoice. /// - /// If `sending_parameters` are provided they will override the default as well as the - /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send( - &self, invoice: &Bolt11Invoice, sending_parameters: Option, + &self, invoice: &Bolt11Invoice, route_parameters: Option, ) -> Result { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } let invoice = maybe_deref(invoice); - - let (payment_hash, recipient_onion, mut route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { - log_error!(self.logger, "Failed to send payment due to the given invoice being \"zero-amount\". Please use send_using_amount instead."); - Error::InvalidInvoice - })?; - + let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); let payment_id = PaymentId(invoice.payment_hash().to_byte_array()); if let Some(payment) = self.payment_store.get(&payment_id) { if payment.status == PaymentStatus::Pending @@ -118,29 +111,16 @@ impl Bolt11Payment { } } - let override_params = - sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); - if let Some(override_params) = override_params { - override_params - .max_total_routing_fee_msat - .map(|f| route_params.max_total_routing_fee_msat = f.into()); - override_params - .max_total_cltv_expiry_delta - .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); - override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); - override_params - .max_channel_saturation_power_of_half - .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); - }; - - let payment_secret = Some(*invoice.payment_secret()); + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); + let payment_secret = Some(*invoice.payment_secret()); - match self.channel_manager.send_payment( - payment_hash, - recipient_onion, + match self.channel_manager.pay_for_bolt11_invoice( + invoice, payment_id, - route_params, + None, + route_parameters, retry_strategy, ) { Ok(()) => { @@ -166,7 +146,13 @@ impl Bolt11Payment { Ok(payment_id) }, - Err(e) => { + Err(Bolt11PaymentError::InvalidAmount) => { + log_error!(self.logger, + "Failed to send payment due to the given invoice being \"zero-amount\". Please use send_using_amount instead." + ); + return Err(Error::InvalidInvoice); + }, + Err(Bolt11PaymentError::SendingFailed(e)) => { log_error!(self.logger, "Failed to send payment: {:?}", e); match e { RetryableSendFailure::DuplicatePayment => Err(Error::DuplicatePayment), @@ -200,18 +186,17 @@ impl Bolt11Payment { /// This can be used to pay a so-called "zero-amount" invoice, i.e., an invoice that leaves the /// amount paid to be determined by the user. /// - /// If `sending_parameters` are provided they will override the default as well as the - /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send_using_amount( &self, invoice: &Bolt11Invoice, amount_msat: u64, - sending_parameters: Option, + route_parameters: Option, ) -> Result { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } let invoice = maybe_deref(invoice); - if let Some(invoice_amount_msat) = invoice.amount_milli_satoshis() { if amount_msat < invoice_amount_msat { log_error!( @@ -232,46 +217,16 @@ impl Bolt11Payment { } } - let payment_secret = invoice.payment_secret(); - let expiry_time = invoice.duration_since_epoch().saturating_add(invoice.expiry_time()); - let mut payment_params = PaymentParameters::from_node_id( - invoice.recover_payee_pub_key(), - invoice.min_final_cltv_expiry_delta() as u32, - ) - .with_expiry_time(expiry_time.as_secs()) - .with_route_hints(invoice.route_hints()) - .map_err(|_| Error::InvalidInvoice)?; - if let Some(features) = invoice.features() { - payment_params = payment_params - .with_bolt11_features(features.clone()) - .map_err(|_| Error::InvalidInvoice)?; - } - let mut route_params = - RouteParameters::from_payment_params_and_value(payment_params, amount_msat); - - let override_params = - sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); - if let Some(override_params) = override_params { - override_params - .max_total_routing_fee_msat - .map(|f| route_params.max_total_routing_fee_msat = f.into()); - override_params - .max_total_cltv_expiry_delta - .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); - override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); - override_params - .max_channel_saturation_power_of_half - .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); - }; - + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let recipient_fields = RecipientOnionFields::secret_only(*payment_secret); + let payment_secret = Some(*invoice.payment_secret()); - match self.channel_manager.send_payment( - payment_hash, - recipient_fields, + match self.channel_manager.pay_for_bolt11_invoice( + invoice, payment_id, - route_params, + Some(amount_msat), + route_parameters, retry_strategy, ) { Ok(()) => { @@ -286,7 +241,7 @@ impl Bolt11Payment { let kind = PaymentKind::Bolt11 { hash: payment_hash, preimage: None, - secret: Some(*payment_secret), + secret: payment_secret, }; let payment = PaymentDetails::new( @@ -301,16 +256,22 @@ impl Bolt11Payment { Ok(payment_id) }, - Err(e) => { + Err(Bolt11PaymentError::InvalidAmount) => { + log_error!( + self.logger, + "Failed to send payment due to amount given being insufficient." + ); + return Err(Error::InvalidInvoice); + }, + Err(Bolt11PaymentError::SendingFailed(e)) => { log_error!(self.logger, "Failed to send payment: {:?}", e); - match e { RetryableSendFailure::DuplicatePayment => Err(Error::DuplicatePayment), _ => { let kind = PaymentKind::Bolt11 { hash: payment_hash, preimage: None, - secret: Some(*payment_secret), + secret: payment_secret, }; let payment = PaymentDetails::new( payment_id, @@ -320,8 +281,8 @@ impl Bolt11Payment { PaymentDirection::Outbound, PaymentStatus::Failed, ); - self.payment_store.insert(payment)?; + self.payment_store.insert(payment)?; Err(Error::PaymentSendingFailed) }, } @@ -798,18 +759,41 @@ impl Bolt11Payment { /// payment. To mitigate this issue, channels with available liquidity less than the required /// amount times [`Config::probing_liquidity_limit_multiplier`] won't be used to send /// pre-flight probes. - pub fn send_probes(&self, invoice: &Bolt11Invoice) -> Result<(), Error> { + /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + pub fn send_probes( + &self, invoice: &Bolt11Invoice, route_parameters: Option, + ) -> Result<(), Error> { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } let invoice = maybe_deref(invoice); + let payment_params = PaymentParameters::from_bolt11_invoice(invoice); - let (_payment_hash, _recipient_onion, route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { + let amount_msat = invoice.amount_milli_satoshis().ok_or_else(|| { log_error!(self.logger, "Failed to send probes due to the given invoice being \"zero-amount\". Please use send_probes_using_amount instead."); Error::InvalidInvoice })?; + let mut route_params = + RouteParameters::from_payment_params_and_value(payment_params, amount_msat); + + if let Some(RouteParametersConfig { + max_total_routing_fee_msat, + max_total_cltv_expiry_delta, + max_path_count, + max_channel_saturation_power_of_half, + }) = route_parameters.as_ref().or(self.config.route_parameters.as_ref()) + { + route_params.max_total_routing_fee_msat = *max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = *max_total_cltv_expiry_delta; + route_params.payment_params.max_path_count = *max_path_count; + route_params.payment_params.max_channel_saturation_power_of_half = + *max_channel_saturation_power_of_half; + } + let liquidity_limit_multiplier = Some(self.config.probing_liquidity_limit_multiplier); self.channel_manager @@ -828,36 +812,49 @@ impl Bolt11Payment { /// This can be used to send pre-flight probes for a so-called "zero-amount" invoice, i.e., an /// invoice that leaves the amount paid to be determined by the user. /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + /// /// See [`Self::send_probes`] for more information. pub fn send_probes_using_amount( &self, invoice: &Bolt11Invoice, amount_msat: u64, + route_parameters: Option, ) -> Result<(), Error> { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } let invoice = maybe_deref(invoice); + let payment_params = PaymentParameters::from_bolt11_invoice(invoice); - let (_payment_hash, _recipient_onion, route_params) = if let Some(invoice_amount_msat) = - invoice.amount_milli_satoshis() - { + if let Some(invoice_amount_msat) = invoice.amount_milli_satoshis() { if amount_msat < invoice_amount_msat { log_error!( self.logger, - "Failed to send probes as the given amount needs to be at least the invoice amount: required {}msat, gave {}msat.", invoice_amount_msat, amount_msat); + "Failed to send probes as the given amount needs to be at least the invoice amount: required {}msat, gave {}msat.", + invoice_amount_msat, + amount_msat + ); return Err(Error::InvalidAmount); } + } - bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { - log_error!(self.logger, "Failed to send probes due to the given invoice unexpectedly being \"zero-amount\"."); - Error::InvalidInvoice - })? - } else { - bolt11_payment::payment_parameters_from_variable_amount_invoice(&invoice, amount_msat).map_err(|_| { - log_error!(self.logger, "Failed to send probes due to the given invoice unexpectedly being not \"zero-amount\"."); - Error::InvalidInvoice - })? - }; + let mut route_params = + RouteParameters::from_payment_params_and_value(payment_params, amount_msat); + + if let Some(RouteParametersConfig { + max_total_routing_fee_msat, + max_total_cltv_expiry_delta, + max_path_count, + max_channel_saturation_power_of_half, + }) = route_parameters.as_ref().or(self.config.route_parameters.as_ref()) + { + route_params.max_total_routing_fee_msat = *max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = *max_total_cltv_expiry_delta; + route_params.payment_params.max_path_count = *max_path_count; + route_params.payment_params.max_channel_saturation_power_of_half = + *max_channel_saturation_power_of_half; + } let liquidity_limit_multiplier = Some(self.config.probing_liquidity_limit_multiplier); diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 8e10b9f4f..4e968deb7 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -19,7 +19,9 @@ use crate::types::{ChannelManager, PaymentStore}; use lightning::ln::channelmanager::{PaymentId, Retry}; use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; use lightning::offers::parse::Bolt12SemanticError; -use lightning::util::string::UntrustedString; +use lightning::routing::router::RouteParametersConfig; + +use lightning_types::string::UntrustedString; use rand::RngCore; @@ -82,7 +84,7 @@ impl Bolt12Payment { rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let max_total_routing_fee_msat = None; + let route_params_config = RouteParametersConfig::default(); let offer_amount_msat = match offer.amount() { Some(Amount::Bitcoin { amount_msats }) => amount_msats, @@ -103,7 +105,7 @@ impl Bolt12Payment { payer_note.clone(), payment_id, retry_strategy, - max_total_routing_fee_msat, + route_params_config, ) { Ok(()) => { let payee_pubkey = offer.issuer_signing_pubkey(); @@ -185,7 +187,7 @@ impl Bolt12Payment { rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let max_total_routing_fee_msat = None; + let route_params_config = RouteParametersConfig::default(); let offer_amount_msat = match offer.amount() { Some(Amount::Bitcoin { amount_msats }) => amount_msats, @@ -210,7 +212,7 @@ impl Bolt12Payment { payer_note.clone(), payment_id, retry_strategy, - max_total_routing_fee_msat, + route_params_config, ) { Ok(()) => { let payee_pubkey = offer.issuer_signing_pubkey(); @@ -273,17 +275,17 @@ impl Bolt12Payment { pub(crate) fn receive_inner( &self, amount_msat: u64, description: &str, expiry_secs: Option, quantity: Option, ) -> Result { - let absolute_expiry = expiry_secs.map(|secs| { - (SystemTime::now() + Duration::from_secs(secs as u64)) - .duration_since(UNIX_EPOCH) - .unwrap() - }); + let mut offer_builder = self.channel_manager.create_offer_builder().map_err(|e| { + log_error!(self.logger, "Failed to create offer builder: {:?}", e); + Error::OfferCreationFailed + })?; - let offer_builder = - self.channel_manager.create_offer_builder(absolute_expiry).map_err(|e| { - log_error!(self.logger, "Failed to create offer builder: {:?}", e); - Error::OfferCreationFailed - })?; + if let Some(expiry_secs) = expiry_secs { + let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) + .duration_since(UNIX_EPOCH) + .unwrap(); + offer_builder = offer_builder.absolute_expiry(absolute_expiry); + } let mut offer = offer_builder.amount_msats(amount_msat).description(description.to_string()); @@ -319,17 +321,18 @@ impl Bolt12Payment { pub fn receive_variable_amount( &self, description: &str, expiry_secs: Option, ) -> Result { - let absolute_expiry = expiry_secs.map(|secs| { - (SystemTime::now() + Duration::from_secs(secs as u64)) + let mut offer_builder = self.channel_manager.create_offer_builder().map_err(|e| { + log_error!(self.logger, "Failed to create offer builder: {:?}", e); + Error::OfferCreationFailed + })?; + + if let Some(expiry_secs) = expiry_secs { + let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) - .unwrap() - }); + .unwrap(); + offer_builder = offer_builder.absolute_expiry(absolute_expiry); + } - let offer_builder = - self.channel_manager.create_offer_builder(absolute_expiry).map_err(|e| { - log_error!(self.logger, "Failed to create offer builder: {:?}", e); - Error::OfferCreationFailed - })?; let offer = offer_builder.description(description.to_string()).build().map_err(|e| { log_error!(self.logger, "Failed to create offer: {:?}", e); Error::OfferCreationFailed @@ -396,7 +399,7 @@ impl Bolt12Payment { .duration_since(UNIX_EPOCH) .unwrap(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let max_total_routing_fee_msat = None; + let route_params_config = RouteParametersConfig::default(); let mut refund_builder = self .channel_manager @@ -405,7 +408,7 @@ impl Bolt12Payment { absolute_expiry, payment_id, retry_strategy, - max_total_routing_fee_msat, + route_params_config, ) .map_err(|e| { log_error!(self.logger, "Failed to create refund builder: {:?}", e); diff --git a/src/payment/mod.rs b/src/payment/mod.rs index b031e37fd..54f7894dc 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -22,87 +22,3 @@ pub use store::{ ConfirmationStatus, LSPFeeLimits, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, }; pub use unified_qr::{QrPaymentResult, UnifiedQrPayment}; - -/// Represents information used to send a payment. -#[derive(Clone, Debug, PartialEq)] -pub struct SendingParameters { - /// The maximum total fees, in millisatoshi, that may accrue during route finding. - /// - /// This limit also applies to the total fees that may arise while retrying failed payment - /// paths. - /// - /// Note that values below a few sats may result in some paths being spuriously ignored. - #[cfg(not(feature = "uniffi"))] - pub max_total_routing_fee_msat: Option>, - /// The maximum total fees, in millisatoshi, that may accrue during route finding. - /// - /// This limit also applies to the total fees that may arise while retrying failed payment - /// paths. - /// - /// Note that values below a few sats may result in some paths being spuriously ignored. - #[cfg(feature = "uniffi")] - pub max_total_routing_fee_msat: Option, - /// The maximum total CLTV delta we accept for the route. - /// - /// Defaults to [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]. - /// - /// [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]: lightning::routing::router::DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA - pub max_total_cltv_expiry_delta: Option, - /// The maximum number of paths that may be used by (MPP) payments. - /// - /// Defaults to [`DEFAULT_MAX_PATH_COUNT`]. - /// - /// [`DEFAULT_MAX_PATH_COUNT`]: lightning::routing::router::DEFAULT_MAX_PATH_COUNT - pub max_path_count: Option, - /// Selects the maximum share of a channel's total capacity which will be sent over a channel, - /// as a power of 1/2. - /// - /// A higher value prefers to send the payment using more MPP parts whereas - /// a lower value prefers to send larger MPP parts, potentially saturating channels and - /// increasing failure probability for those paths. - /// - /// Note that this restriction will be relaxed during pathfinding after paths which meet this - /// restriction have been found. While paths which meet this criteria will be searched for, it - /// is ultimately up to the scorer to select them over other paths. - /// - /// Examples: - /// - /// | Value | Max Proportion of Channel Capacity Used | - /// |-------|-----------------------------------------| - /// | 0 | Up to 100% of the channel’s capacity | - /// | 1 | Up to 50% of the channel’s capacity | - /// | 2 | Up to 25% of the channel’s capacity | - /// | 3 | Up to 12.5% of the channel’s capacity | - /// - /// Default value: 2 - pub max_channel_saturation_power_of_half: Option, -} - -/// Represents the possible states of [`SendingParameters::max_total_routing_fee_msat`]. -// -// Required only in bindings as UniFFI can't expose `Option>`. -#[cfg(feature = "uniffi")] -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum MaxTotalRoutingFeeLimit { - None, - Some { amount_msat: u64 }, -} - -#[cfg(feature = "uniffi")] -impl From for Option { - fn from(value: MaxTotalRoutingFeeLimit) -> Self { - match value { - MaxTotalRoutingFeeLimit::Some { amount_msat } => Some(amount_msat), - MaxTotalRoutingFeeLimit::None => None, - } - } -} - -#[cfg(feature = "uniffi")] -impl From> for MaxTotalRoutingFeeLimit { - fn from(value: Option) -> Self { - value.map_or(MaxTotalRoutingFeeLimit::None, |amount_msat| MaxTotalRoutingFeeLimit::Some { - amount_msat, - }) - } -} diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 3e48fd090..181307a0f 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -11,11 +11,10 @@ use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::error::Error; use crate::logger::{log_error, log_info, LdkLogger, Logger}; use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; -use crate::payment::SendingParameters; use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore}; use lightning::ln::channelmanager::{PaymentId, RecipientOnionFields, Retry, RetryableSendFailure}; -use lightning::routing::router::{PaymentParameters, RouteParameters}; +use lightning::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; use lightning::sign::EntropySource; use lightning_types::payment::{PaymentHash, PaymentPreimage}; @@ -52,41 +51,43 @@ impl SpontaneousPayment { /// Send a spontaneous aka. "keysend", payment. /// - /// If `sending_parameters` are provided they will override the default as well as the - /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send( - &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, + &self, amount_msat: u64, node_id: PublicKey, + route_parameters: Option, ) -> Result { - self.send_inner(amount_msat, node_id, sending_parameters, None, None) + self.send_inner(amount_msat, node_id, route_parameters, None, None) } /// Send a spontaneous payment including a list of custom TLVs. pub fn send_with_custom_tlvs( - &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, - custom_tlvs: Vec, + &self, amount_msat: u64, node_id: PublicKey, + route_parameters: Option, custom_tlvs: Vec, ) -> Result { - self.send_inner(amount_msat, node_id, sending_parameters, Some(custom_tlvs), None) + self.send_inner(amount_msat, node_id, route_parameters, Some(custom_tlvs), None) } /// Send a spontaneous payment with custom preimage pub fn send_with_preimage( &self, amount_msat: u64, node_id: PublicKey, preimage: PaymentPreimage, - sending_parameters: Option, + route_parameters: Option, ) -> Result { - self.send_inner(amount_msat, node_id, sending_parameters, None, Some(preimage)) + self.send_inner(amount_msat, node_id, route_parameters, None, Some(preimage)) } /// Send a spontaneous payment with custom preimage including a list of custom TLVs. pub fn send_with_preimage_and_custom_tlvs( &self, amount_msat: u64, node_id: PublicKey, custom_tlvs: Vec, - preimage: PaymentPreimage, sending_parameters: Option, + preimage: PaymentPreimage, route_parameters: Option, ) -> Result { - self.send_inner(amount_msat, node_id, sending_parameters, Some(custom_tlvs), Some(preimage)) + self.send_inner(amount_msat, node_id, route_parameters, Some(custom_tlvs), Some(preimage)) } fn send_inner( - &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, - custom_tlvs: Option>, preimage: Option, + &self, amount_msat: u64, node_id: PublicKey, + route_parameters: Option, custom_tlvs: Option>, + preimage: Option, ) -> Result { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); @@ -112,20 +113,19 @@ impl SpontaneousPayment { amount_msat, ); - let override_params = - sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); - if let Some(override_params) = override_params { - override_params - .max_total_routing_fee_msat - .map(|f| route_params.max_total_routing_fee_msat = f.into()); - override_params - .max_total_cltv_expiry_delta - .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); - override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); - override_params - .max_channel_saturation_power_of_half - .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); - }; + if let Some(RouteParametersConfig { + max_total_routing_fee_msat, + max_total_cltv_expiry_delta, + max_path_count, + max_channel_saturation_power_of_half, + }) = route_parameters.as_ref().or(self.config.route_parameters.as_ref()) + { + route_params.max_total_routing_fee_msat = *max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = *max_total_cltv_expiry_delta; + route_params.payment_params.max_path_count = *max_path_count; + route_params.payment_params.max_channel_saturation_power_of_half = + *max_channel_saturation_power_of_half; + } let recipient_fields = match custom_tlvs { Some(tlvs) => RecipientOnionFields::spontaneous_empty() diff --git a/src/payment/store.rs b/src/payment/store.rs index 75b2b1b2a..568394b48 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -9,13 +9,13 @@ use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::DecodeError; use lightning::offers::offer::OfferId; use lightning::util::ser::{Readable, Writeable}; -use lightning::util::string::UntrustedString; use lightning::{ _init_and_read_len_prefixed_tlv_fields, impl_writeable_tlv_based, impl_writeable_tlv_based_enum, write_tlv_fields, }; use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning_types::string::UntrustedString; use bitcoin::{BlockHash, Txid}; diff --git a/src/peer_store.rs b/src/peer_store.rs index 4d1c65157..cf3755d23 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -73,7 +73,7 @@ where PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, PEER_INFO_PERSISTENCE_KEY, - &data, + data, ) .map_err(|e| { log_error!( diff --git a/src/types.rs b/src/types.rs index 3103ead3f..b9bc1c317 100644 --- a/src/types.rs +++ b/src/types.rs @@ -25,20 +25,23 @@ use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::persist::KVStore; +use lightning::util::persist::KVStoreSync; +use lightning::util::persist::KVStoreSyncWrapper; use lightning::util::ser::{Readable, Writeable, Writer}; -use lightning::util::sweep::OutputSweeper; +use lightning::util::sweep::OutputSweeper; use lightning_block_sync::gossip::{GossipVerifier, UtxoSource}; use lightning_net_tokio::SocketDescriptor; +use lightning_liquidity::utils::time::DefaultTimeProvider; + use bitcoin::secp256k1::PublicKey; use bitcoin::OutPoint; use std::sync::{Arc, Mutex}; -pub(crate) type DynStore = dyn KVStore + Sync + Send; +pub(crate) type DynStore = dyn KVStoreSync + Sync + Send; pub(crate) type ChainMonitor = chainmonitor::ChainMonitor< InMemorySigner, @@ -47,6 +50,7 @@ pub(crate) type ChainMonitor = chainmonitor::ChainMonitor< Arc, Arc, Arc, + Arc, >; pub(crate) type PeerManager = lightning::ln::peer_handler::PeerManager< @@ -57,10 +61,16 @@ pub(crate) type PeerManager = lightning::ln::peer_handler::PeerManager< Arc, Arc>>, Arc, + Arc, >; -pub(crate) type LiquidityManager = - lightning_liquidity::LiquidityManager, Arc, Arc>; +pub(crate) type LiquidityManager = lightning_liquidity::LiquidityManager< + Arc, + Arc, + Arc, + Arc, + Arc, +>; pub(crate) type ChannelManager = lightning::ln::channelmanager::ChannelManager< Arc, @@ -76,11 +86,8 @@ pub(crate) type ChannelManager = lightning::ln::channelmanager::ChannelManager< pub(crate) type Broadcaster = crate::tx_broadcaster::TransactionBroadcaster>; -pub(crate) type Wallet = - crate::wallet::Wallet, Arc, Arc>; - -pub(crate) type KeysManager = - crate::wallet::WalletKeysManager, Arc, Arc>; +pub(crate) type Wallet = crate::wallet::Wallet; +pub(crate) type KeysManager = crate::wallet::WalletKeysManager; pub(crate) type Router = DefaultRouter< Arc, @@ -132,7 +139,7 @@ pub(crate) type Sweeper = OutputSweeper< Arc, Arc, Arc, - Arc, + KVStoreSyncWrapper>, Arc, Arc, >; diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index fbac1d1b6..c03353ef8 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -8,12 +8,12 @@ use persist::KVStoreWalletPersister; use crate::config::Config; -use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger}; +use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; -use crate::fee_estimator::{ConfirmationTarget, FeeEstimator}; +use crate::fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; use crate::payment::store::ConfirmationStatus; use crate::payment::{PaymentDetails, PaymentDirection, PaymentStatus}; -use crate::types::PaymentStore; +use crate::types::{Broadcaster, PaymentStore}; use crate::Error; use lightning::chain::chaininterface::BroadcasterInterface; @@ -23,11 +23,11 @@ use lightning::chain::{BestBlock, Listen}; use lightning::events::bump_transaction::{Utxo, WalletSource}; use lightning::ln::channelmanager::PaymentId; use lightning::ln::inbound_payment::ExpandedKey; -use lightning::ln::msgs::{DecodeError, UnsignedGossipMessage}; +use lightning::ln::msgs::UnsignedGossipMessage; use lightning::ln::script::ShutdownScript; use lightning::sign::{ ChangeDestinationSource, EntropySource, InMemorySigner, KeysManager, NodeSigner, OutputSpender, - Recipient, SignerProvider, SpendableOutputDescriptor, + PeerStorageKey, Recipient, SignerProvider, SpendableOutputDescriptor, }; use lightning::util::message_signing; @@ -44,13 +44,14 @@ use bitcoin::key::XOnlyPublicKey; use bitcoin::psbt::Psbt; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; -use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, Signing}; +use bitcoin::secp256k1::{All, PublicKey, Scalar, Secp256k1, SecretKey}; use bitcoin::{ Address, Amount, FeeRate, Network, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, WitnessProgram, WitnessVersion, }; -use std::ops::Deref; +use std::future::Future; +use std::pin::Pin; use std::str::FromStr; use std::sync::{Arc, Mutex}; @@ -63,32 +64,23 @@ pub(crate) enum OnchainSendAmount { pub(crate) mod persist; pub(crate) mod ser; -pub(crate) struct Wallet -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +pub(crate) struct Wallet { // A BDK on-chain wallet. inner: Mutex>, persister: Mutex, - broadcaster: B, - fee_estimator: E, + broadcaster: Arc, + fee_estimator: Arc, payment_store: Arc, config: Arc, - logger: L, + logger: Arc, } -impl Wallet -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl Wallet { pub(crate) fn new( wallet: bdk_wallet::PersistedWallet, - wallet_persister: KVStoreWalletPersister, broadcaster: B, fee_estimator: E, - payment_store: Arc, config: Arc, logger: L, + wallet_persister: KVStoreWalletPersister, broadcaster: Arc, + fee_estimator: Arc, payment_store: Arc, + config: Arc, logger: Arc, ) -> Self { let inner = Mutex::new(wallet); let persister = Mutex::new(wallet_persister); @@ -318,7 +310,7 @@ where #[cfg(debug_assertions)] if balance.confirmed != Amount::ZERO { debug_assert!( - self.list_confirmed_utxos().map_or(false, |v| !v.is_empty()), + self.list_confirmed_utxos_inner().map_or(false, |v| !v.is_empty()), "Confirmed amounts should always be available for Anchor spending" ); } @@ -568,80 +560,8 @@ where Ok(txid) } -} - -impl Listen for Wallet -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ - fn filtered_block_connected( - &self, _header: &bitcoin::block::Header, - _txdata: &lightning::chain::transaction::TransactionData, _height: u32, - ) { - debug_assert!(false, "Syncing filtered blocks is currently not supported"); - // As far as we can tell this would be a no-op anyways as we don't have to tell BDK about - // the header chain of intermediate blocks. According to the BDK team, it's sufficient to - // only connect full blocks starting from the last point of disagreement. - } - - fn block_connected(&self, block: &bitcoin::Block, height: u32) { - let mut locked_wallet = self.inner.lock().unwrap(); - - let pre_checkpoint = locked_wallet.latest_checkpoint(); - if pre_checkpoint.height() != height - 1 - || pre_checkpoint.hash() != block.header.prev_blockhash - { - log_debug!( - self.logger, - "Detected reorg while applying a connected block to on-chain wallet: new block with hash {} at height {}", - block.header.block_hash(), - height - ); - } - match locked_wallet.apply_block(block, height) { - Ok(()) => { - if let Err(e) = self.update_payment_store(&mut *locked_wallet) { - log_error!(self.logger, "Failed to update payment store: {}", e); - return; - } - }, - Err(e) => { - log_error!( - self.logger, - "Failed to apply connected block to on-chain wallet: {}", - e - ); - return; - }, - }; - - let mut locked_persister = self.persister.lock().unwrap(); - match locked_wallet.persist(&mut locked_persister) { - Ok(_) => (), - Err(e) => { - log_error!(self.logger, "Failed to persist on-chain wallet: {}", e); - return; - }, - }; - } - - fn block_disconnected(&self, _header: &bitcoin::block::Header, _height: u32) { - // This is a no-op as we don't have to tell BDK about disconnections. According to the BDK - // team, it's sufficient in case of a reorg to always connect blocks starting from the last - // point of disagreement. - } -} - -impl WalletSource for Wallet -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ - fn list_confirmed_utxos(&self) -> Result, ()> { + fn list_confirmed_utxos_inner(&self) -> Result, ()> { let locked_wallet = self.inner.lock().unwrap(); let mut utxos = Vec::new(); let confirmed_txs: Vec = locked_wallet @@ -733,7 +653,7 @@ where Ok(utxos) } - fn get_change_script(&self) -> Result { + fn get_change_script_inner(&self) -> Result { let mut locked_wallet = self.inner.lock().unwrap(); let mut locked_persister = self.persister.lock().unwrap(); @@ -745,7 +665,7 @@ where Ok(address_info.address.script_pubkey()) } - fn sign_psbt(&self, mut psbt: Psbt) -> Result { + fn sign_psbt_inner(&self, mut psbt: Psbt) -> Result { let locked_wallet = self.inner.lock().unwrap(); // While BDK populates both `witness_utxo` and `non_witness_utxo` fields, LDK does not. As @@ -775,32 +695,102 @@ where } } +impl Listen for Wallet { + fn filtered_block_connected( + &self, _header: &bitcoin::block::Header, + _txdata: &lightning::chain::transaction::TransactionData, _height: u32, + ) { + debug_assert!(false, "Syncing filtered blocks is currently not supported"); + // As far as we can tell this would be a no-op anyways as we don't have to tell BDK about + // the header chain of intermediate blocks. According to the BDK team, it's sufficient to + // only connect full blocks starting from the last point of disagreement. + } + + fn block_connected(&self, block: &bitcoin::Block, height: u32) { + let mut locked_wallet = self.inner.lock().unwrap(); + + let pre_checkpoint = locked_wallet.latest_checkpoint(); + if pre_checkpoint.height() != height - 1 + || pre_checkpoint.hash() != block.header.prev_blockhash + { + log_debug!( + self.logger, + "Detected reorg while applying a connected block to on-chain wallet: new block with hash {} at height {}", + block.header.block_hash(), + height + ); + } + + match locked_wallet.apply_block(block, height) { + Ok(()) => { + if let Err(e) = self.update_payment_store(&mut *locked_wallet) { + log_error!(self.logger, "Failed to update payment store: {}", e); + return; + } + }, + Err(e) => { + log_error!( + self.logger, + "Failed to apply connected block to on-chain wallet: {}", + e + ); + return; + }, + }; + + let mut locked_persister = self.persister.lock().unwrap(); + match locked_wallet.persist(&mut locked_persister) { + Ok(_) => (), + Err(e) => { + log_error!(self.logger, "Failed to persist on-chain wallet: {}", e); + return; + }, + }; + } + + fn blocks_disconnected(&self, _fork_point_block: BestBlock) { + // This is a no-op as we don't have to tell BDK about disconnections. According to the BDK + // team, it's sufficient in case of a reorg to always connect blocks starting from the last + // point of disagreement. + } +} + +impl WalletSource for Wallet { + fn list_confirmed_utxos<'a>( + &'a self, + ) -> Pin, ()>> + Send + 'a>> { + Box::pin(async move { self.list_confirmed_utxos_inner() }) + } + + fn get_change_script<'a>( + &'a self, + ) -> Pin> + Send + 'a>> { + Box::pin(async move { self.get_change_script_inner() }) + } + + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> Pin> + Send + 'a>> { + Box::pin(async move { self.sign_psbt_inner(psbt) }) + } +} + /// Similar to [`KeysManager`], but overrides the destination and shutdown scripts so they are /// directly spendable by the BDK wallet. -pub(crate) struct WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +pub(crate) struct WalletKeysManager { inner: KeysManager, - wallet: Arc>, - logger: L, + wallet: Arc, + logger: Arc, } -impl WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl WalletKeysManager { /// Constructs a `WalletKeysManager` that overrides the destination and shutdown scripts. /// /// See [`KeysManager::new`] for more information on `seed`, `starting_time_secs`, and /// `starting_time_nanos`. pub fn new( - seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32, - wallet: Arc>, logger: L, + seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32, wallet: Arc, + logger: Arc, ) -> Self { let inner = KeysManager::new(seed, starting_time_secs, starting_time_nanos); Self { inner, wallet, logger } @@ -819,12 +809,7 @@ where } } -impl NodeSigner for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl NodeSigner for WalletKeysManager { fn get_node_id(&self, recipient: Recipient) -> Result { self.inner.get_node_id(recipient) } @@ -835,8 +820,16 @@ where self.inner.ecdh(recipient, other_key, tweak) } - fn get_inbound_payment_key(&self) -> ExpandedKey { - self.inner.get_inbound_payment_key() + fn get_expanded_key(&self) -> ExpandedKey { + self.inner.get_expanded_key() + } + + fn get_peer_storage_key(&self) -> PeerStorageKey { + self.inner.get_peer_storage_key() + } + + fn get_receive_auth_key(&self) -> lightning::sign::ReceiveAuthKey { + self.inner.get_receive_auth_key() } fn sign_invoice( @@ -854,19 +847,17 @@ where ) -> Result { self.inner.sign_bolt12_invoice(invoice) } + fn sign_message(&self, msg: &[u8]) -> Result { + self.inner.sign_message(msg) + } } -impl OutputSpender for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl OutputSpender for WalletKeysManager { /// See [`KeysManager::spend_spendable_outputs`] for documentation on this method. - fn spend_spendable_outputs( + fn spend_spendable_outputs( &self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec, change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32, - locktime: Option, secp_ctx: &Secp256k1, + locktime: Option, secp_ctx: &Secp256k1, ) -> Result { self.inner.spend_spendable_outputs( descriptors, @@ -879,39 +870,21 @@ where } } -impl EntropySource for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl EntropySource for WalletKeysManager { fn get_secure_random_bytes(&self) -> [u8; 32] { self.inner.get_secure_random_bytes() } } -impl SignerProvider for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl SignerProvider for WalletKeysManager { type EcdsaSigner = InMemorySigner; - fn generate_channel_keys_id( - &self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128, - ) -> [u8; 32] { - self.inner.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id) - } - - fn derive_channel_signer( - &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32], - ) -> Self::EcdsaSigner { - self.inner.derive_channel_signer(channel_value_satoshis, channel_keys_id) + fn generate_channel_keys_id(&self, inbound: bool, user_channel_id: u128) -> [u8; 32] { + self.inner.generate_channel_keys_id(inbound, user_channel_id) } - fn read_chan_signer(&self, reader: &[u8]) -> Result { - self.inner.read_chan_signer(reader) + fn derive_channel_signer(&self, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner { + self.inner.derive_channel_signer(channel_keys_id) } fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { @@ -941,16 +914,20 @@ where } } -impl ChangeDestinationSource for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ - fn get_change_destination_script(&self) -> Result { - let address = self.wallet.get_new_internal_address().map_err(|e| { - log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); - })?; - Ok(address.script_pubkey()) +impl ChangeDestinationSource for WalletKeysManager { + fn get_change_destination_script<'a>( + &self, + ) -> Pin> + Send + 'a>> { + let wallet = Arc::clone(&self.wallet); + let logger = Arc::clone(&self.logger); + Box::pin(async move { + wallet + .get_new_internal_address() + .map_err(|e| { + log_error!(logger, "Failed to retrieve new address from wallet: {}", e); + }) + .map(|addr| addr.script_pubkey()) + .map_err(|_| ()) + }) } } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 780e9bbf4..f5bfe76fc 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -21,7 +21,7 @@ use ldk_node::{ use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; -use lightning::util::persist::KVStore; +use lightning::util::persist::KVStoreSync; use lightning::util::test_utils::TestStore; use lightning_invoice::{Bolt11InvoiceDescription, Description}; @@ -1236,7 +1236,7 @@ impl TestSyncStore { } } -impl KVStore for TestSyncStore { +impl KVStoreSync for TestSyncStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> lightning::io::Result> { @@ -1263,12 +1263,14 @@ impl KVStore for TestSyncStore { } fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); - let fs_res = self.fs_store.write(primary_namespace, secondary_namespace, key, buf); - let sqlite_res = self.sqlite_store.write(primary_namespace, secondary_namespace, key, buf); - let test_res = self.test_store.write(primary_namespace, secondary_namespace, key, buf); + let fs_res = self.fs_store.write(primary_namespace, secondary_namespace, key, buf.clone()); + let sqlite_res = + self.sqlite_store.write(primary_namespace, secondary_namespace, key, buf.clone()); + let test_res = + self.test_store.write(primary_namespace, secondary_namespace, key, buf.clone()); assert!(self .do_list(primary_namespace, secondary_namespace) diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 0932116ef..fa88fe0cc 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -22,13 +22,14 @@ use ldk_node::config::EsploraSyncConfig; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, - QrPaymentResult, SendingParameters, + QrPaymentResult, }; use ldk_node::{Builder, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; -use lightning::util::persist::KVStore; +use lightning::routing::router::RouteParametersConfig; +use lightning::util::persist::KVStoreSync; use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; @@ -212,11 +213,11 @@ fn multi_hop_sending() { // Sleep a bit for gossip to propagate. std::thread::sleep(std::time::Duration::from_secs(1)); - let sending_params = SendingParameters { - max_total_routing_fee_msat: Some(Some(75_000).into()), - max_total_cltv_expiry_delta: Some(1000), - max_path_count: Some(10), - max_channel_saturation_power_of_half: Some(2), + let route_params = RouteParametersConfig { + max_total_routing_fee_msat: Some(75_000), + max_total_cltv_expiry_delta: 1000, + max_path_count: 10, + max_channel_saturation_power_of_half: 2, }; let invoice_description = @@ -225,7 +226,7 @@ fn multi_hop_sending() { .bolt11_payment() .receive(2_500_000, &invoice_description.clone().into(), 9217) .unwrap(); - nodes[0].bolt11_payment().send(&invoice, Some(sending_params)).unwrap(); + nodes[0].bolt11_payment().send(&invoice, Some(route_params)).unwrap(); expect_event!(nodes[1], PaymentForwarded); @@ -246,7 +247,7 @@ fn start_stop_reinit() { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let test_sync_store: Arc = + let test_sync_store: Arc = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.clone().into())); let sync_config = EsploraSyncConfig { background_sync_config: None }; From 4b45d7c1e6f3494e23f005ce04eb8761d06ec6af Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 15 Aug 2025 09:42:47 +0200 Subject: [PATCH 071/184] Switch to use `rustls-ring` everywhere We switch to use `rustls-ring` everywhere, which is necessary for Swift builds, but also generally makes our lives easier. --- .github/workflows/kotlin.yml | 3 --- Cargo.toml | 12 ++++++------ src/builder.rs | 2 +- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/.github/workflows/kotlin.yml b/.github/workflows/kotlin.yml index 5cb1b8c27..a1711ba49 100644 --- a/.github/workflows/kotlin.yml +++ b/.github/workflows/kotlin.yml @@ -39,9 +39,6 @@ jobs: - name: Generate Kotlin JVM run: ./scripts/uniffi_bindgen_generate_kotlin.sh - - name: Install `bindgen-cli` - run: cargo install --force bindgen-cli - - name: Generate Kotlin Android run: ./scripts/uniffi_bindgen_generate_kotlin_android.sh diff --git a/Cargo.toml b/Cargo.toml index aaaa55f39..9010ad6d5 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,7 +36,7 @@ default = [] #lightning-background-processor = { version = "0.1.0" } #lightning-rapid-gossip-sync = { version = "0.1.0" } #lightning-block-sync = { version = "0.1.0", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { version = "0.1.0", features = ["esplora-async-https", "time", "electrum"] } +#lightning-transaction-sync = { version = "0.1.0", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } #lightning-liquidity = { version = "0.1.0", features = ["std"] } #lightning-macros = { version = "0.1.0" } @@ -48,7 +48,7 @@ default = [] #lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["esplora-async-https", "electrum", "time"] } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } @@ -60,7 +60,7 @@ lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["esplora-async-https", "electrum", "time"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } @@ -72,13 +72,13 @@ lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", #lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } #lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } #lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum", "time"] } +#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } #lightning-liquidity = { path = "../rust-lightning/lightning-liquidity", features = ["std"] } #lightning-macros = { path = "../rust-lightning/lightning-macros" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} -bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls"]} +bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls-ring"]} bdk_wallet = { version = "2.0.0", default-features = false, features = ["std", "keys-bip39"]} reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } @@ -93,7 +93,7 @@ rand = "0.8.5" chrono = { version = "0.4", default-features = false, features = ["clock"] } tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } esplora-client = { version = "0.12", default-features = false, features = ["tokio", "async-https-rustls"] } -electrum-client = { version = "0.24.0", default-features = true } +electrum-client = { version = "0.24.0", default-features = false, features = ["proxy", "use-rustls-ring"] } libc = "0.2" uniffi = { version = "0.28.3", features = ["build"], optional = true } serde = { version = "1.0.210", default-features = false, features = ["std", "derive"] } diff --git a/src/builder.rs b/src/builder.rs index 094c21e72..a46b182e1 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1688,7 +1688,7 @@ fn optionally_install_rustls_cryptoprovider() { INIT_CRYPTO.call_once(|| { // Ensure we always install a `CryptoProvider` for `rustls` if it was somehow not previously installed by now. if rustls::crypto::CryptoProvider::get_default().is_none() { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + let _ = rustls::crypto::ring::default_provider().install_default(); } // Refuse to startup without TLS support. Better to catch it now than even later at runtime. From 80ac9f35eaf39847ed6c2438df1c25e0299a6807 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 3 Sep 2025 12:46:07 +0200 Subject: [PATCH 072/184] Use log timestamps with millisecond resolution Helpful to correlate multiple log files throughout time --- src/logger.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logger.rs b/src/logger.rs index bbd24ec20..40817897c 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -124,7 +124,7 @@ impl LogWriter for Writer { let log = format!( "{} {:<5} [{}:{}] {}\n", - Utc::now().format("%Y-%m-%d %H:%M:%S"), + Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), record.level.to_string(), record.module_path, record.line, From 66f5c28ca8663a3c9e5b7562660192f3804bec4f Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 19 Aug 2025 15:37:31 +0200 Subject: [PATCH 073/184] Add static invoice support This commit adds support for using ldk-node as a static invoice server. When configured as such, the node persists and retrieves invoices from the configured kv store. Access is guarded by a rate limiter to prevent overload and mitigate potential DoS attacks. In this mode, ldk-node also exposes blinded paths that can be shared with async recipients, allowing them to contact the static invoice server. When ldk-node functions as a recipient, it can communicate with the static invoice server to set up async payments. --- bindings/ldk_node.udl | 9 + src/builder.rs | 2 +- src/config.rs | 3 + src/error.rs | 8 + src/event.rs | 63 +++- src/io/mod.rs | 5 + src/lib.rs | 10 + src/payment/asynchronous/mod.rs | 9 + src/payment/asynchronous/rate_limiter.rs | 96 ++++++ .../asynchronous/static_invoice_store.rs | 277 ++++++++++++++++++ src/payment/bolt12.rs | 105 ++++++- src/payment/mod.rs | 1 + src/types.rs | 2 +- tests/integration_tests_rust.rs | 95 ++++++ 14 files changed, 672 insertions(+), 13 deletions(-) create mode 100644 src/payment/asynchronous/mod.rs create mode 100644 src/payment/asynchronous/rate_limiter.rs create mode 100644 src/payment/asynchronous/static_invoice_store.rs diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index b9bab61e8..9f0ef697e 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -13,6 +13,7 @@ dictionary Config { u64 probing_liquidity_limit_multiplier; AnchorChannelsConfig? anchor_channels_config; RouteParametersConfig? route_parameters; + boolean async_payment_services_enabled; }; dictionary AnchorChannelsConfig { @@ -209,6 +210,12 @@ interface Bolt12Payment { Bolt12Invoice request_refund_payment([ByRef]Refund refund); [Throws=NodeError] Refund initiate_refund(u64 amount_msat, u32 expiry_secs, u64? quantity, string? payer_note); + [Throws=NodeError] + Offer receive_async(); + [Throws=NodeError] + void set_paths_to_static_invoice_server(bytes paths); + [Throws=NodeError] + bytes blinded_paths_for_async_recipient(bytes recipient_id); }; interface SpontaneousPayment { @@ -311,6 +318,8 @@ enum NodeError { "InsufficientFunds", "LiquiditySourceUnavailable", "LiquidityFeeTooHigh", + "InvalidBlindedPaths", + "AsyncPaymentServicesDisabled", }; dictionary NodeStatus { diff --git a/src/builder.rs b/src/builder.rs index a46b182e1..d330597ee 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1455,7 +1455,7 @@ fn build_with_store_internal( Arc::clone(&channel_manager), message_router, Arc::clone(&channel_manager), - IgnoringMessageHandler {}, + Arc::clone(&channel_manager), IgnoringMessageHandler {}, IgnoringMessageHandler {}, )); diff --git a/src/config.rs b/src/config.rs index 84f62d220..bb0bd56ba 100644 --- a/src/config.rs +++ b/src/config.rs @@ -179,6 +179,8 @@ pub struct Config { /// **Note:** If unset, default parameters will be used, and you will be able to override the /// parameters on a per-payment basis in the corresponding method calls. pub route_parameters: Option, + /// Whether to enable the static invoice service to support async payment reception for clients. + pub async_payment_services_enabled: bool, } impl Default for Config { @@ -193,6 +195,7 @@ impl Default for Config { anchor_channels_config: Some(AnchorChannelsConfig::default()), route_parameters: None, node_alias: None, + async_payment_services_enabled: false, } } } diff --git a/src/error.rs b/src/error.rs index 2cb71186d..eaa022e56 100644 --- a/src/error.rs +++ b/src/error.rs @@ -120,6 +120,10 @@ pub enum Error { LiquiditySourceUnavailable, /// The given operation failed due to the LSP's required opening fee being too high. LiquidityFeeTooHigh, + /// The given blinded paths are invalid. + InvalidBlindedPaths, + /// Asynchronous payment services are disabled. + AsyncPaymentServicesDisabled, } impl fmt::Display for Error { @@ -193,6 +197,10 @@ impl fmt::Display for Error { Self::LiquidityFeeTooHigh => { write!(f, "The given operation failed due to the LSP's required opening fee being too high.") }, + Self::InvalidBlindedPaths => write!(f, "The given blinded paths are invalid."), + Self::AsyncPaymentServicesDisabled => { + write!(f, "Asynchronous payment services are disabled.") + }, } } } diff --git a/src/event.rs b/src/event.rs index bad1b84ab..7a6dc4832 100644 --- a/src/event.rs +++ b/src/event.rs @@ -6,7 +6,6 @@ // accordance with one or both of these licenses. use crate::types::{CustomTlvRecord, DynStore, PaymentStore, Sweeper, Wallet}; - use crate::{ hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, UserChannelId, @@ -19,6 +18,7 @@ use crate::fee_estimator::ConfirmationTarget; use crate::liquidity::LiquiditySource; use crate::logger::Logger; +use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; use crate::payment::store::{ PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, }; @@ -27,7 +27,7 @@ use crate::io::{ EVENT_QUEUE_PERSISTENCE_KEY, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, }; -use crate::logger::{log_debug, log_error, log_info, LdkLogger}; +use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger}; use crate::runtime::Runtime; @@ -458,6 +458,7 @@ where runtime: Arc, logger: L, config: Arc, + static_invoice_store: Option, } impl EventHandler @@ -470,8 +471,9 @@ where channel_manager: Arc, connection_manager: Arc>, output_sweeper: Arc, network_graph: Arc, liquidity_source: Option>>>, - payment_store: Arc, peer_store: Arc>, runtime: Arc, - logger: L, config: Arc, + payment_store: Arc, peer_store: Arc>, + static_invoice_store: Option, runtime: Arc, logger: L, + config: Arc, ) -> Self { Self { event_queue, @@ -487,6 +489,7 @@ where logger, runtime, config, + static_invoice_store, } } @@ -1494,11 +1497,55 @@ where LdkEvent::OnionMessagePeerConnected { .. } => { debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); }, - LdkEvent::PersistStaticInvoice { .. } => { - debug_assert!(false, "We currently don't support static invoice persistence, so this event should never be emitted."); + + LdkEvent::PersistStaticInvoice { + invoice, + invoice_slot, + recipient_id, + invoice_persisted_path, + } => { + if let Some(store) = self.static_invoice_store.as_ref() { + match store + .handle_persist_static_invoice(invoice, invoice_slot, recipient_id) + .await + { + Ok(_) => { + self.channel_manager.static_invoice_persisted(invoice_persisted_path); + }, + Err(e) => { + log_error!(self.logger, "Failed to persist static invoice: {}", e); + return Err(ReplayEvent()); + }, + }; + } }, - LdkEvent::StaticInvoiceRequested { .. } => { - debug_assert!(false, "We currently don't support static invoice persistence, so this event should never be emitted."); + LdkEvent::StaticInvoiceRequested { recipient_id, invoice_slot, reply_path } => { + if let Some(store) = self.static_invoice_store.as_ref() { + let invoice = + store.handle_static_invoice_requested(&recipient_id, invoice_slot).await; + + match invoice { + Ok(Some(invoice)) => { + if let Err(e) = + self.channel_manager.send_static_invoice(invoice, reply_path) + { + log_error!(self.logger, "Failed to send static invoice: {:?}", e); + } + }, + Ok(None) => { + log_trace!( + self.logger, + "No static invoice found for recipient {} and slot {}", + hex_utils::to_string(&recipient_id), + invoice_slot + ); + }, + Err(e) => { + log_error!(self.logger, "Failed to retrieve static invoice: {}", e); + return Err(ReplayEvent()); + }, + } + } }, LdkEvent::FundingTransactionReadyForSigning { .. } => { debug_assert!(false, "We currently don't support interactive-tx, so this event should never be emitted."); diff --git a/src/io/mod.rs b/src/io/mod.rs index 7a52a5c98..38fba5114 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -73,3 +73,8 @@ pub(crate) const BDK_WALLET_TX_GRAPH_KEY: &str = "tx_graph"; pub(crate) const BDK_WALLET_INDEXER_PRIMARY_NAMESPACE: &str = "bdk_wallet"; pub(crate) const BDK_WALLET_INDEXER_SECONDARY_NAMESPACE: &str = ""; pub(crate) const BDK_WALLET_INDEXER_KEY: &str = "indexer"; + +/// [`StaticInvoice`]s will be persisted under this key. +/// +/// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice +pub(crate) const STATIC_INVOICE_STORE_PRIMARY_NAMESPACE: &str = "static_invoices"; diff --git a/src/lib.rs b/src/lib.rs index 160762dd2..e7e27273b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -136,6 +136,7 @@ use gossip::GossipSource; use graph::NetworkGraph; use io::utils::write_node_metrics; use liquidity::{LSPS1Liquidity, LiquiditySource}; +use payment::asynchronous::static_invoice_store::StaticInvoiceStore; use payment::{ Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment, UnifiedQrPayment, @@ -498,6 +499,12 @@ impl Node { Arc::clone(&self.logger), )); + let static_invoice_store = if self.config.async_payment_services_enabled { + Some(StaticInvoiceStore::new(Arc::clone(&self.kv_store))) + } else { + None + }; + let event_handler = Arc::new(EventHandler::new( Arc::clone(&self.event_queue), Arc::clone(&self.wallet), @@ -509,6 +516,7 @@ impl Node { self.liquidity_source.clone(), Arc::clone(&self.payment_store), Arc::clone(&self.peer_store), + static_invoice_store, Arc::clone(&self.runtime), Arc::clone(&self.logger), Arc::clone(&self.config), @@ -818,6 +826,7 @@ impl Node { Bolt12Payment::new( Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.config), Arc::clone(&self.is_running), Arc::clone(&self.logger), ) @@ -831,6 +840,7 @@ impl Node { Arc::new(Bolt12Payment::new( Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.config), Arc::clone(&self.is_running), Arc::clone(&self.logger), )) diff --git a/src/payment/asynchronous/mod.rs b/src/payment/asynchronous/mod.rs new file mode 100644 index 000000000..ebb7a4bd3 --- /dev/null +++ b/src/payment/asynchronous/mod.rs @@ -0,0 +1,9 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +mod rate_limiter; +pub(crate) mod static_invoice_store; diff --git a/src/payment/asynchronous/rate_limiter.rs b/src/payment/asynchronous/rate_limiter.rs new file mode 100644 index 000000000..153577b16 --- /dev/null +++ b/src/payment/asynchronous/rate_limiter.rs @@ -0,0 +1,96 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +//! [`RateLimiter`] to control the rate of requests from users. + +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +/// Implements a leaky-bucket style rate limiter parameterized by the max capacity of the bucket, the refill interval, +/// and the max idle duration. +/// +/// For every passing of the refill interval, one token is added to the bucket, up to the maximum capacity. When the +/// bucket has remained at the maximum capacity for longer than the max idle duration, it is removed to prevent memory +/// leakage. +pub(crate) struct RateLimiter { + users: HashMap, Bucket>, + capacity: u32, + refill_interval: Duration, + max_idle: Duration, +} + +struct Bucket { + tokens: u32, + last_refill: Instant, +} + +impl RateLimiter { + pub(crate) fn new(capacity: u32, refill_interval: Duration, max_idle: Duration) -> Self { + Self { users: HashMap::new(), capacity, refill_interval, max_idle } + } + + pub(crate) fn allow(&mut self, user_id: &[u8]) -> bool { + let now = Instant::now(); + + let entry = self.users.entry(user_id.to_vec()); + let is_new_user = matches!(entry, std::collections::hash_map::Entry::Vacant(_)); + + let bucket = entry.or_insert(Bucket { tokens: self.capacity, last_refill: now }); + + let elapsed = now.duration_since(bucket.last_refill); + let tokens_to_add = (elapsed.as_secs_f64() / self.refill_interval.as_secs_f64()) as u32; + + if tokens_to_add > 0 { + bucket.tokens = (bucket.tokens + tokens_to_add).min(self.capacity); + bucket.last_refill = now; + } + + let allow = if bucket.tokens > 0 { + bucket.tokens -= 1; + true + } else { + false + }; + + // Each time a new user is added, we take the opportunity to clean up old rate limits. + if is_new_user { + self.garbage_collect(self.max_idle); + } + + allow + } + + fn garbage_collect(&mut self, max_idle: Duration) { + let now = Instant::now(); + self.users.retain(|_, bucket| now.duration_since(bucket.last_refill) < max_idle); + } +} + +#[cfg(test)] +mod tests { + use crate::payment::asynchronous::rate_limiter::RateLimiter; + + use std::time::Duration; + + #[test] + fn rate_limiter_test() { + // Test + let mut rate_limiter = + RateLimiter::new(3, Duration::from_millis(100), Duration::from_secs(1)); + + assert!(rate_limiter.allow(b"user1")); + assert!(rate_limiter.allow(b"user1")); + assert!(rate_limiter.allow(b"user1")); + assert!(!rate_limiter.allow(b"user1")); + assert!(rate_limiter.allow(b"user2")); + + std::thread::sleep(Duration::from_millis(150)); + + assert!(rate_limiter.allow(b"user1")); + assert!(rate_limiter.allow(b"user2")); + } +} diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs new file mode 100644 index 000000000..eed6720e5 --- /dev/null +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -0,0 +1,277 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +//! Store implementation for [`StaticInvoice`]s. + +use crate::hex_utils; +use crate::io::STATIC_INVOICE_STORE_PRIMARY_NAMESPACE; +use crate::payment::asynchronous::rate_limiter::RateLimiter; +use crate::types::DynStore; + +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; + +use lightning::{offers::static_invoice::StaticInvoice, util::ser::Writeable}; + +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +pub(crate) struct StaticInvoiceStore { + kv_store: Arc, + request_rate_limiter: Mutex, + persist_rate_limiter: Mutex, +} + +impl StaticInvoiceStore { + const RATE_LIMITER_BUCKET_CAPACITY: u32 = 5; + const RATE_LIMITER_REFILL_INTERVAL: Duration = Duration::from_millis(100); + const RATE_LIMITER_MAX_IDLE: Duration = Duration::from_secs(600); + + pub(crate) fn new(kv_store: Arc) -> Self { + Self { + kv_store, + request_rate_limiter: Mutex::new(RateLimiter::new( + Self::RATE_LIMITER_BUCKET_CAPACITY, + Self::RATE_LIMITER_REFILL_INTERVAL, + Self::RATE_LIMITER_MAX_IDLE, + )), + persist_rate_limiter: Mutex::new(RateLimiter::new( + Self::RATE_LIMITER_BUCKET_CAPACITY, + Self::RATE_LIMITER_REFILL_INTERVAL, + Self::RATE_LIMITER_MAX_IDLE, + )), + } + } + + fn check_rate_limit( + limiter: &Mutex, recipient_id: &[u8], + ) -> Result<(), lightning::io::Error> { + let mut limiter = limiter.lock().unwrap(); + if !limiter.allow(recipient_id) { + Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, "Rate limit exceeded")) + } else { + Ok(()) + } + } + + pub(crate) async fn handle_static_invoice_requested( + &self, recipient_id: &[u8], invoice_slot: u16, + ) -> Result, lightning::io::Error> { + Self::check_rate_limit(&self.request_rate_limiter, &recipient_id)?; + + let (secondary_namespace, key) = Self::get_storage_location(invoice_slot, recipient_id); + + self.kv_store + .read(STATIC_INVOICE_STORE_PRIMARY_NAMESPACE, &secondary_namespace, &key) + .and_then(|data| { + data.try_into().map(Some).map_err(|e| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidData, + format!("Failed to parse static invoice: {:?}", e), + ) + }) + }) + .or_else( + |e| { + if e.kind() == lightning::io::ErrorKind::NotFound { + Ok(None) + } else { + Err(e) + } + }, + ) + } + + pub(crate) async fn handle_persist_static_invoice( + &self, invoice: StaticInvoice, invoice_slot: u16, recipient_id: Vec, + ) -> Result<(), lightning::io::Error> { + Self::check_rate_limit(&self.persist_rate_limiter, &recipient_id)?; + + let (secondary_namespace, key) = Self::get_storage_location(invoice_slot, &recipient_id); + + let mut buf = Vec::new(); + invoice.write(&mut buf)?; + + // Static invoices will be persisted at "static_invoices//". + // + // Example: static_invoices/039058c6f2c0cb492c533b0a4d14ef77cc0f78abccced5287d84a1a2011cfb81/00001 + self.kv_store.write(STATIC_INVOICE_STORE_PRIMARY_NAMESPACE, &secondary_namespace, &key, buf) + } + + fn get_storage_location(invoice_slot: u16, recipient_id: &[u8]) -> (String, String) { + let hash = Sha256::hash(recipient_id).to_byte_array(); + let secondary_namespace = hex_utils::to_string(&hash); + + let key = format!("{:05}", invoice_slot); + (secondary_namespace, key) + } +} + +#[cfg(test)] +mod tests { + use std::{sync::Arc, time::Duration}; + + use bitcoin::{ + key::{Keypair, Secp256k1}, + secp256k1::{PublicKey, SecretKey}, + }; + use lightning::blinded_path::{ + message::BlindedMessagePath, + payment::{BlindedPayInfo, BlindedPaymentPath}, + BlindedHop, + }; + use lightning::ln::inbound_payment::ExpandedKey; + use lightning::offers::{ + nonce::Nonce, + offer::OfferBuilder, + static_invoice::{StaticInvoice, StaticInvoiceBuilder}, + }; + use lightning::sign::EntropySource; + use lightning::util::test_utils::TestStore; + use lightning_types::features::BlindedHopFeatures; + + use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; + use crate::types::DynStore; + + #[tokio::test] + async fn static_invoice_store_test() { + let store: Arc = Arc::new(TestStore::new(false)); + let static_invoice_store = StaticInvoiceStore::new(Arc::clone(&store)); + + let static_invoice = invoice(); + let recipient_id = vec![1, 1, 1]; + assert!(static_invoice_store + .handle_persist_static_invoice(static_invoice.clone(), 0, recipient_id.clone()) + .await + .is_ok()); + + let requested_invoice = + static_invoice_store.handle_static_invoice_requested(&recipient_id, 0).await.unwrap(); + + assert_eq!(requested_invoice.unwrap(), static_invoice); + + assert!(static_invoice_store + .handle_static_invoice_requested(&recipient_id, 1) + .await + .unwrap() + .is_none()); + + assert!(static_invoice_store + .handle_static_invoice_requested(&[2, 2, 2], 0) + .await + .unwrap() + .is_none()); + } + + fn invoice() -> StaticInvoice { + let node_id = recipient_pubkey(); + let payment_paths = payment_paths(); + let now = now(); + let expanded_key = ExpandedKey::new([42; 32]); + let entropy = FixedEntropy {}; + let nonce = Nonce::from_entropy_source(&entropy); + let secp_ctx = Secp256k1::new(); + + let offer = OfferBuilder::deriving_signing_pubkey(node_id, &expanded_key, nonce, &secp_ctx) + .path(blinded_path()) + .build() + .unwrap(); + + StaticInvoiceBuilder::for_offer_using_derived_keys( + &offer, + payment_paths.clone(), + vec![blinded_path()], + now, + &expanded_key, + nonce, + &secp_ctx, + ) + .unwrap() + .build_and_sign(&secp_ctx) + .unwrap() + } + + fn now() -> Duration { + std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH") + } + + fn payment_paths() -> Vec { + vec![ + BlindedPaymentPath::from_blinded_path_and_payinfo( + pubkey(40), + pubkey(41), + vec![ + BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] }, + BlindedHop { blinded_node_id: pubkey(44), encrypted_payload: vec![0; 44] }, + ], + BlindedPayInfo { + fee_base_msat: 1, + fee_proportional_millionths: 1_000, + cltv_expiry_delta: 42, + htlc_minimum_msat: 100, + htlc_maximum_msat: 1_000_000_000_000, + features: BlindedHopFeatures::empty(), + }, + ), + BlindedPaymentPath::from_blinded_path_and_payinfo( + pubkey(40), + pubkey(41), + vec![ + BlindedHop { blinded_node_id: pubkey(45), encrypted_payload: vec![0; 45] }, + BlindedHop { blinded_node_id: pubkey(46), encrypted_payload: vec![0; 46] }, + ], + BlindedPayInfo { + fee_base_msat: 1, + fee_proportional_millionths: 1_000, + cltv_expiry_delta: 42, + htlc_minimum_msat: 100, + htlc_maximum_msat: 1_000_000_000_000, + features: BlindedHopFeatures::empty(), + }, + ), + ] + } + + fn blinded_path() -> BlindedMessagePath { + BlindedMessagePath::from_blinded_path( + pubkey(40), + pubkey(41), + vec![ + BlindedHop { blinded_node_id: pubkey(42), encrypted_payload: vec![0; 43] }, + BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 44] }, + ], + ) + } + + fn pubkey(byte: u8) -> PublicKey { + let secp_ctx = Secp256k1::new(); + PublicKey::from_secret_key(&secp_ctx, &privkey(byte)) + } + + fn privkey(byte: u8) -> SecretKey { + SecretKey::from_slice(&[byte; 32]).unwrap() + } + + fn recipient_keys() -> Keypair { + let secp_ctx = Secp256k1::new(); + Keypair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap()) + } + + fn recipient_pubkey() -> PublicKey { + recipient_keys().public_key() + } + + struct FixedEntropy; + + impl EntropySource for FixedEntropy { + fn get_secure_random_bytes(&self) -> [u8; 32] { + [42; 32] + } + } +} diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 4e968deb7..81349e2bd 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -9,18 +9,21 @@ //! //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md -use crate::config::LDK_PAYMENT_RETRY_TIMEOUT; +use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::error::Error; use crate::ffi::{maybe_deref, maybe_wrap}; use crate::logger::{log_error, log_info, LdkLogger, Logger}; use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; use crate::types::{ChannelManager, PaymentStore}; +use lightning::blinded_path::message::BlindedMessagePath; use lightning::ln::channelmanager::{PaymentId, Retry}; use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::routing::router::RouteParametersConfig; +#[cfg(feature = "uniffi")] +use lightning::util::ser::{Readable, Writeable}; use lightning_types::string::UntrustedString; use rand::RngCore; @@ -54,15 +57,16 @@ pub struct Bolt12Payment { channel_manager: Arc, payment_store: Arc, is_running: Arc>, + config: Arc, logger: Arc, } impl Bolt12Payment { pub(crate) fn new( channel_manager: Arc, payment_store: Arc, - is_running: Arc>, logger: Arc, + config: Arc, is_running: Arc>, logger: Arc, ) -> Self { - Self { channel_manager, payment_store, is_running, logger } + Self { channel_manager, payment_store, config, is_running, logger } } /// Send a payment given an offer. @@ -450,4 +454,99 @@ impl Bolt12Payment { Ok(maybe_wrap(refund)) } + + /// Retrieve an [`Offer`] for receiving async payments as an often-offline recipient. + /// + /// Will only return an offer if [`Bolt12Payment::set_paths_to_static_invoice_server`] was called and we succeeded + /// in interactively building a [`StaticInvoice`] with the static invoice server. + /// + /// Useful for posting offers to receive payments later, such as posting an offer on a website. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + /// [`Offer`]: lightning::offers::offer::Offer + pub fn receive_async(&self) -> Result { + self.channel_manager + .get_async_receive_offer() + .map(maybe_wrap) + .or(Err(Error::OfferCreationFailed)) + } + + /// Sets the [`BlindedMessagePath`]s that we will use as an async recipient to interactively build [`Offer`]s with a + /// static invoice server, so the server can serve [`StaticInvoice`]s to payers on our behalf when we're offline. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + #[cfg(not(feature = "uniffi"))] + pub fn set_paths_to_static_invoice_server( + &self, paths: Vec, + ) -> Result<(), Error> { + self.channel_manager + .set_paths_to_static_invoice_server(paths) + .or(Err(Error::InvalidBlindedPaths)) + } + + /// Sets the [`BlindedMessagePath`]s that we will use as an async recipient to interactively build [`Offer`]s with a + /// static invoice server, so the server can serve [`StaticInvoice`]s to payers on our behalf when we're offline. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + #[cfg(feature = "uniffi")] + pub fn set_paths_to_static_invoice_server(&self, paths: Vec) -> Result<(), Error> { + let decoded_paths = as Readable>::read(&mut &paths[..]) + .or(Err(Error::InvalidBlindedPaths))?; + + self.channel_manager + .set_paths_to_static_invoice_server(decoded_paths) + .or(Err(Error::InvalidBlindedPaths)) + } + + /// [`BlindedMessagePath`]s for an async recipient to communicate with this node and interactively + /// build [`Offer`]s and [`StaticInvoice`]s for receiving async payments. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + #[cfg(not(feature = "uniffi"))] + pub fn blinded_paths_for_async_recipient( + &self, recipient_id: Vec, + ) -> Result, Error> { + self.blinded_paths_for_async_recipient_internal(recipient_id) + } + + /// [`BlindedMessagePath`]s for an async recipient to communicate with this node and interactively + /// build [`Offer`]s and [`StaticInvoice`]s for receiving async payments. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + #[cfg(feature = "uniffi")] + pub fn blinded_paths_for_async_recipient( + &self, recipient_id: Vec, + ) -> Result, Error> { + let paths = self.blinded_paths_for_async_recipient_internal(recipient_id)?; + + let mut bytes = Vec::new(); + paths.write(&mut bytes).or(Err(Error::InvalidBlindedPaths))?; + Ok(bytes) + } + + fn blinded_paths_for_async_recipient_internal( + &self, recipient_id: Vec, + ) -> Result, Error> { + if !self.config.async_payment_services_enabled { + return Err(Error::AsyncPaymentServicesDisabled); + } + + self.channel_manager + .blinded_paths_for_async_recipient(recipient_id, None) + .or(Err(Error::InvalidBlindedPaths)) + } } diff --git a/src/payment/mod.rs b/src/payment/mod.rs index 54f7894dc..f629960e1 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -7,6 +7,7 @@ //! Objects for different types of payments. +pub(crate) mod asynchronous; mod bolt11; mod bolt12; mod onchain; diff --git a/src/types.rs b/src/types.rs index b9bc1c317..3635badff 100644 --- a/src/types.rs +++ b/src/types.rs @@ -123,7 +123,7 @@ pub(crate) type OnionMessenger = lightning::onion_message::messenger::OnionMesse Arc, Arc, Arc, - IgnoringMessageHandler, + Arc, IgnoringMessageHandler, IgnoringMessageHandler, >; diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index fa88fe0cc..77f46091d 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -1130,6 +1130,101 @@ fn simple_bolt12_send_receive() { assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(overpaid_amount)); } +#[test] +fn static_invoice_server() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + + let config_sender = random_config(true); + let node_sender = setup_node(&chain_source, config_sender, None); + + let config_sender_lsp = random_config(true); + let node_sender_lsp = setup_node(&chain_source, config_sender_lsp, None); + + let mut config_receiver_lsp = random_config(true); + config_receiver_lsp.node_config.async_payment_services_enabled = true; + let node_receiver_lsp = setup_node(&chain_source, config_receiver_lsp, None); + + let config_receiver = random_config(true); + let node_receiver = setup_node(&chain_source, config_receiver, None); + + let address_sender = node_sender.onchain_payment().new_address().unwrap(); + let address_sender_lsp = node_sender_lsp.onchain_payment().new_address().unwrap(); + let address_receiver_lsp = node_receiver_lsp.onchain_payment().new_address().unwrap(); + let address_receiver = node_receiver.onchain_payment().new_address().unwrap(); + let premine_amount_sat = 4_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_sender, address_sender_lsp, address_receiver_lsp, address_receiver], + Amount::from_sat(premine_amount_sat), + ); + + node_sender.sync_wallets().unwrap(); + node_sender_lsp.sync_wallets().unwrap(); + node_receiver_lsp.sync_wallets().unwrap(); + node_receiver.sync_wallets().unwrap(); + + open_channel(&node_sender, &node_sender_lsp, 400_000, true, &electrsd); + open_channel(&node_sender_lsp, &node_receiver_lsp, 400_000, true, &electrsd); + open_channel(&node_receiver_lsp, &node_receiver, 400_000, true, &electrsd); + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + + node_sender.sync_wallets().unwrap(); + node_sender_lsp.sync_wallets().unwrap(); + node_receiver_lsp.sync_wallets().unwrap(); + node_receiver.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_sender, node_sender_lsp.node_id()); + expect_channel_ready_event!(node_sender_lsp, node_sender.node_id()); + expect_channel_ready_event!(node_sender_lsp, node_receiver_lsp.node_id()); + expect_channel_ready_event!(node_receiver_lsp, node_sender_lsp.node_id()); + expect_channel_ready_event!(node_receiver_lsp, node_receiver.node_id()); + expect_channel_ready_event!(node_receiver, node_receiver_lsp.node_id()); + + let has_node_announcements = |node: &ldk_node::Node| { + node.network_graph() + .list_nodes() + .iter() + .filter(|n| { + node.network_graph().node(n).map_or(false, |info| info.announcement_info.is_some()) + }) + .count() >= 4 + }; + + // Wait for everyone to see all channels and node announcements. + while node_sender.network_graph().list_channels().len() < 3 + || node_sender_lsp.network_graph().list_channels().len() < 3 + || node_receiver_lsp.network_graph().list_channels().len() < 3 + || node_receiver.network_graph().list_channels().len() < 3 + || !has_node_announcements(&node_sender) + || !has_node_announcements(&node_sender_lsp) + || !has_node_announcements(&node_receiver_lsp) + || !has_node_announcements(&node_receiver) + { + std::thread::sleep(std::time::Duration::from_millis(100)); + } + + let recipient_id = vec![1, 2, 3]; + let blinded_paths = + node_receiver_lsp.bolt12_payment().blinded_paths_for_async_recipient(recipient_id).unwrap(); + node_receiver.bolt12_payment().set_paths_to_static_invoice_server(blinded_paths).unwrap(); + + let offer = loop { + if let Ok(offer) = node_receiver.bolt12_payment().receive_async() { + break offer; + } + + std::thread::sleep(std::time::Duration::from_millis(100)); + }; + + let payment_id = + node_sender.bolt12_payment().send_using_amount(&offer, 5_000, None, None).unwrap(); + + expect_payment_successful_event!(node_sender, Some(payment_id), None); +} + #[test] fn test_node_announcement_propagation() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); From 006a06e157ef6f9d80584b9be33bfd5b8a47cf02 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 16 Sep 2025 10:19:33 +0200 Subject: [PATCH 074/184] Adapt channel balance reporting to use confirmed candidate With splicing now implemented, a channel may have multiple holder commitment transactions and corresponding balance candidates. ldk-node now reports the confirmed balance candidate rather than a single static balance, ensuring the exposed value matches the channel's onchain state. Other candidate balances remain internal for now. --- Cargo.toml | 24 ++++++++++++------------ src/balance.rs | 30 ++++++++++++++++++------------ 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9010ad6d5..c2b7775ac 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,17 +52,17 @@ default = [] #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } #lightning = { path = "../rust-lightning/lightning", features = ["std"] } #lightning-types = { path = "../rust-lightning/lightning-types" } @@ -109,7 +109,7 @@ winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] #lightning = { version = "0.1.0", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "4e32d85249359d8ef8ece97d89848e40154363ab", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" diff --git a/src/balance.rs b/src/balance.rs index d0ebc310b..7ba4826a9 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -73,7 +73,8 @@ pub struct BalanceDetails { pub enum LightningBalance { /// The channel is not yet closed (or the commitment or closing transaction has not yet /// appeared in a block). The given balance is claimable (less on-chain fees) if the channel is - /// force-closed now. + /// force-closed now. Values do not take into account any pending splices and are only based + /// on the confirmed state of the channel. ClaimableOnChannelClose { /// The identifier of the channel this balance belongs to. channel_id: ChannelId, @@ -224,21 +225,26 @@ impl LightningBalance { ) -> Self { match balance { LdkBalance::ClaimableOnChannelClose { - amount_satoshis, - transaction_fee_satoshis, - outbound_payment_htlc_rounded_msat, - outbound_forwarded_htlc_rounded_msat, - inbound_claiming_htlc_rounded_msat, - inbound_htlc_rounded_msat, - } => Self::ClaimableOnChannelClose { - channel_id, - counterparty_node_id, - amount_satoshis, - transaction_fee_satoshis, + balance_candidates, + confirmed_balance_candidate_index, outbound_payment_htlc_rounded_msat, outbound_forwarded_htlc_rounded_msat, inbound_claiming_htlc_rounded_msat, inbound_htlc_rounded_msat, + } => { + // unwrap safety: confirmed_balance_candidate_index is guaranteed to index into balance_candidates + let balance = balance_candidates.get(confirmed_balance_candidate_index).unwrap(); + + Self::ClaimableOnChannelClose { + channel_id, + counterparty_node_id, + amount_satoshis: balance.amount_satoshis, + transaction_fee_satoshis: balance.transaction_fee_satoshis, + outbound_payment_htlc_rounded_msat, + outbound_forwarded_htlc_rounded_msat, + inbound_claiming_htlc_rounded_msat, + inbound_htlc_rounded_msat, + } }, LdkBalance::ClaimableAwaitingConfirmations { amount_satoshis, From c99ff305edc59c256d3315bcc0327d048b68d000 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 16 Sep 2025 13:34:46 +0200 Subject: [PATCH 075/184] Log to console with node prefix --- tests/common/logging.rs | 27 ++++++++++++++++++++++++++- tests/integration_tests_rust.rs | 17 ++++++++++++++--- 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/tests/common/logging.rs b/tests/common/logging.rs index 6bceac29a..d7d59ba32 100644 --- a/tests/common/logging.rs +++ b/tests/common/logging.rs @@ -1,5 +1,4 @@ use chrono::Utc; -#[cfg(not(feature = "uniffi"))] use ldk_node::logger::LogRecord; use ldk_node::logger::{LogLevel, LogWriter}; #[cfg(not(feature = "uniffi"))] @@ -143,3 +142,29 @@ pub(crate) fn validate_log_entry(entry: &String) { let msg = &path_and_msg[msg_start_index..]; assert!(!msg.is_empty()); } + +pub(crate) struct MultiNodeLogger { + node_id: String, +} + +impl MultiNodeLogger { + pub(crate) fn new(node_id: String) -> Self { + Self { node_id } + } +} + +impl LogWriter for MultiNodeLogger { + fn log(&self, record: LogRecord) { + let log = format!( + "[{}] {} {:<5} [{}:{}] {}\n", + self.node_id, + Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), + record.level.to_string(), + record.module_path, + record.line, + record.args + ); + + print!("{}", log); + } +} diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 77f46091d..c9f2f95fc 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -12,6 +12,7 @@ use common::{ expect_channel_pending_event, expect_channel_ready_event, expect_event, expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, generate_blocks_and_wait, + logging::MultiNodeLogger, logging::{init_log_logger, validate_log_entry, TestLogWriter}, open_channel, premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, @@ -1135,17 +1136,27 @@ fn static_invoice_server() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); - let config_sender = random_config(true); + let mut config_sender = random_config(true); + config_sender.log_writer = + TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender ".to_string()))); let node_sender = setup_node(&chain_source, config_sender, None); - let config_sender_lsp = random_config(true); + let mut config_sender_lsp = random_config(true); + config_sender_lsp.log_writer = + TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender_lsp ".to_string()))); let node_sender_lsp = setup_node(&chain_source, config_sender_lsp, None); let mut config_receiver_lsp = random_config(true); config_receiver_lsp.node_config.async_payment_services_enabled = true; + config_receiver_lsp.log_writer = + TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("receiver_lsp".to_string()))); + let node_receiver_lsp = setup_node(&chain_source, config_receiver_lsp, None); - let config_receiver = random_config(true); + let mut config_receiver = random_config(true); + config_receiver.log_writer = + TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("receiver ".to_string()))); + let node_receiver = setup_node(&chain_source, config_receiver, None); let address_sender = node_sender.onchain_payment().new_address().unwrap(); From 8d18d1655e92c25e1b0e772a93cf63bf1c0575fa Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 16 Sep 2025 11:28:41 +0200 Subject: [PATCH 076/184] Update static invoice store for invoice requests With the merge of https://github.com/lightningdevkit/rust-lightning/pull/4049, it is now possible for a static invoice server to forward the invoice request to the recipient if they are online. --- Cargo.toml | 24 ++++----- src/event.rs | 26 +++++++--- .../asynchronous/static_invoice_store.rs | 50 ++++++++++++++----- 3 files changed, 70 insertions(+), 30 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c2b7775ac..f3038ee96 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,17 +52,17 @@ default = [] #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } #lightning = { path = "../rust-lightning/lightning", features = ["std"] } #lightning-types = { path = "../rust-lightning/lightning-types" } @@ -109,7 +109,7 @@ winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] #lightning = { version = "0.1.0", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "84398d9e5b3dc61c0a5c71972aa944f19948aef0", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" diff --git a/src/event.rs b/src/event.rs index 7a6dc4832..cd9146379 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1500,13 +1500,19 @@ where LdkEvent::PersistStaticInvoice { invoice, + invoice_request_path, invoice_slot, recipient_id, invoice_persisted_path, } => { if let Some(store) = self.static_invoice_store.as_ref() { match store - .handle_persist_static_invoice(invoice, invoice_slot, recipient_id) + .handle_persist_static_invoice( + invoice, + invoice_request_path, + invoice_slot, + recipient_id, + ) .await { Ok(_) => { @@ -1519,16 +1525,24 @@ where }; } }, - LdkEvent::StaticInvoiceRequested { recipient_id, invoice_slot, reply_path } => { + LdkEvent::StaticInvoiceRequested { + recipient_id, + invoice_slot, + reply_path, + invoice_request, + } => { if let Some(store) = self.static_invoice_store.as_ref() { let invoice = store.handle_static_invoice_requested(&recipient_id, invoice_slot).await; match invoice { - Ok(Some(invoice)) => { - if let Err(e) = - self.channel_manager.send_static_invoice(invoice, reply_path) - { + Ok(Some((invoice, invoice_request_path))) => { + if let Err(e) = self.channel_manager.respond_to_static_invoice_request( + invoice, + reply_path, + invoice_request, + invoice_request_path, + ) { log_error!(self.logger, "Failed to send static invoice: {:?}", e); } }, diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index eed6720e5..f1aa702a4 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -15,11 +15,23 @@ use crate::types::DynStore; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; -use lightning::{offers::static_invoice::StaticInvoice, util::ser::Writeable}; +use lightning::blinded_path::message::BlindedMessagePath; +use lightning::impl_writeable_tlv_based; +use lightning::{offers::static_invoice::StaticInvoice, util::ser::Readable, util::ser::Writeable}; use std::sync::{Arc, Mutex}; use std::time::Duration; +struct PersistedStaticInvoice { + invoice: StaticInvoice, + request_path: BlindedMessagePath, +} + +impl_writeable_tlv_based!(PersistedStaticInvoice, { + (0, invoice, required), + (2, request_path, required) +}); + pub(crate) struct StaticInvoiceStore { kv_store: Arc, request_rate_limiter: Mutex, @@ -60,7 +72,7 @@ impl StaticInvoiceStore { pub(crate) async fn handle_static_invoice_requested( &self, recipient_id: &[u8], invoice_slot: u16, - ) -> Result, lightning::io::Error> { + ) -> Result, lightning::io::Error> { Self::check_rate_limit(&self.request_rate_limiter, &recipient_id)?; let (secondary_namespace, key) = Self::get_storage_location(invoice_slot, recipient_id); @@ -68,12 +80,16 @@ impl StaticInvoiceStore { self.kv_store .read(STATIC_INVOICE_STORE_PRIMARY_NAMESPACE, &secondary_namespace, &key) .and_then(|data| { - data.try_into().map(Some).map_err(|e| { - lightning::io::Error::new( - lightning::io::ErrorKind::InvalidData, - format!("Failed to parse static invoice: {:?}", e), - ) - }) + PersistedStaticInvoice::read(&mut &*data) + .map(|persisted_invoice| { + Some((persisted_invoice.invoice, persisted_invoice.request_path)) + }) + .map_err(|e| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidData, + format!("Failed to parse static invoice: {:?}", e), + ) + }) }) .or_else( |e| { @@ -87,14 +103,18 @@ impl StaticInvoiceStore { } pub(crate) async fn handle_persist_static_invoice( - &self, invoice: StaticInvoice, invoice_slot: u16, recipient_id: Vec, + &self, invoice: StaticInvoice, invoice_request_path: BlindedMessagePath, invoice_slot: u16, + recipient_id: Vec, ) -> Result<(), lightning::io::Error> { Self::check_rate_limit(&self.persist_rate_limiter, &recipient_id)?; let (secondary_namespace, key) = Self::get_storage_location(invoice_slot, &recipient_id); + let persisted_invoice = + PersistedStaticInvoice { invoice, request_path: invoice_request_path }; + let mut buf = Vec::new(); - invoice.write(&mut buf)?; + persisted_invoice.write(&mut buf)?; // Static invoices will be persisted at "static_invoices//". // @@ -144,15 +164,21 @@ mod tests { let static_invoice = invoice(); let recipient_id = vec![1, 1, 1]; + let invoice_request_path = blinded_path(); assert!(static_invoice_store - .handle_persist_static_invoice(static_invoice.clone(), 0, recipient_id.clone()) + .handle_persist_static_invoice( + static_invoice.clone(), + invoice_request_path.clone(), + 0, + recipient_id.clone() + ) .await .is_ok()); let requested_invoice = static_invoice_store.handle_static_invoice_requested(&recipient_id, 0).await.unwrap(); - assert_eq!(requested_invoice.unwrap(), static_invoice); + assert_eq!(requested_invoice.unwrap(), (static_invoice, invoice_request_path)); assert!(static_invoice_store .handle_static_invoice_requested(&recipient_id, 1) From efbef4c4d57a4059eab7558e8127698c0bc4299c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 17 Sep 2025 14:14:48 +0200 Subject: [PATCH 077/184] Update static invoice test to use unannounced channels To better align with the expected real life setup. --- src/builder.rs | 4 ++++ tests/common/mod.rs | 11 +++++++++-- tests/integration_tests_rust.rs | 33 ++++++++++++++++++++++----------- 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index d330597ee..b99c44cec 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1378,6 +1378,10 @@ fn build_with_store_internal( 100; } + if config.async_payment_services_enabled { + user_config.accept_forwards_to_priv_channels = true; + } + let message_router = Arc::new(MessageRouter::new(Arc::clone(&network_graph), Arc::clone(&keys_manager))); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index f5bfe76fc..70c9a43a8 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -589,6 +589,13 @@ pub(crate) fn bump_fee_and_broadcast( pub fn open_channel( node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, should_announce: bool, electrsd: &ElectrsD, +) -> OutPoint { + open_channel_push_amt(node_a, node_b, funding_amount_sat, None, should_announce, electrsd) +} + +pub fn open_channel_push_amt( + node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, push_amount_msat: Option, + should_announce: bool, electrsd: &ElectrsD, ) -> OutPoint { if should_announce { node_a @@ -596,7 +603,7 @@ pub fn open_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), funding_amount_sat, - None, + push_amount_msat, None, ) .unwrap(); @@ -606,7 +613,7 @@ pub fn open_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), funding_amount_sat, - None, + push_amount_msat, None, ) .unwrap(); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index c9f2f95fc..f2e8407cd 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -14,9 +14,9 @@ use common::{ generate_blocks_and_wait, logging::MultiNodeLogger, logging::{init_log_logger, validate_log_entry, TestLogWriter}, - open_channel, premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, - random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, - setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, + open_channel, open_channel_push_amt, premine_and_distribute_funds, premine_blocks, prepare_rbf, + random_config, random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, + setup_node, setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, }; use ldk_node::config::EsploraSyncConfig; @@ -1137,11 +1137,14 @@ fn static_invoice_server() { let chain_source = TestChainSource::Esplora(&electrsd); let mut config_sender = random_config(true); + config_sender.node_config.listening_addresses = None; + config_sender.node_config.node_alias = None; config_sender.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender ".to_string()))); let node_sender = setup_node(&chain_source, config_sender, None); let mut config_sender_lsp = random_config(true); + config_sender_lsp.node_config.async_payment_services_enabled = true; config_sender_lsp.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender_lsp ".to_string()))); let node_sender_lsp = setup_node(&chain_source, config_sender_lsp, None); @@ -1154,9 +1157,10 @@ fn static_invoice_server() { let node_receiver_lsp = setup_node(&chain_source, config_receiver_lsp, None); let mut config_receiver = random_config(true); + config_receiver.node_config.listening_addresses = None; + config_receiver.node_config.node_alias = None; config_receiver.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("receiver ".to_string()))); - let node_receiver = setup_node(&chain_source, config_receiver, None); let address_sender = node_sender.onchain_payment().new_address().unwrap(); @@ -1176,9 +1180,16 @@ fn static_invoice_server() { node_receiver_lsp.sync_wallets().unwrap(); node_receiver.sync_wallets().unwrap(); - open_channel(&node_sender, &node_sender_lsp, 400_000, true, &electrsd); + open_channel(&node_sender, &node_sender_lsp, 400_000, false, &electrsd); open_channel(&node_sender_lsp, &node_receiver_lsp, 400_000, true, &electrsd); - open_channel(&node_receiver_lsp, &node_receiver, 400_000, true, &electrsd); + open_channel_push_amt( + &node_receiver, + &node_receiver_lsp, + 400_000, + Some(200_000_000), + false, + &electrsd, + ); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); @@ -1201,14 +1212,14 @@ fn static_invoice_server() { .filter(|n| { node.network_graph().node(n).map_or(false, |info| info.announcement_info.is_some()) }) - .count() >= 4 + .count() >= 2 }; // Wait for everyone to see all channels and node announcements. - while node_sender.network_graph().list_channels().len() < 3 - || node_sender_lsp.network_graph().list_channels().len() < 3 - || node_receiver_lsp.network_graph().list_channels().len() < 3 - || node_receiver.network_graph().list_channels().len() < 3 + while node_sender.network_graph().list_channels().len() < 1 + || node_sender_lsp.network_graph().list_channels().len() < 1 + || node_receiver_lsp.network_graph().list_channels().len() < 1 + || node_receiver.network_graph().list_channels().len() < 1 || !has_node_announcements(&node_sender) || !has_node_announcements(&node_sender_lsp) || !has_node_announcements(&node_receiver_lsp) From 3df14770480c3fadb62b34dc57d23ee5b9b150df Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Fri, 19 Sep 2025 14:42:21 +0200 Subject: [PATCH 078/184] Fix wait_for_tx exponential backoff Backoff wasn't actually working and polling would happen without any delay at all. --- tests/common/mod.rs | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 70c9a43a8..0a1e8cbd2 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -437,32 +437,31 @@ pub(crate) fn wait_for_block(electrs: &E, min_height: usize) { } pub(crate) fn wait_for_tx(electrs: &E, txid: Txid) { - let mut tx_res = electrs.transaction_get(&txid); - loop { - if tx_res.is_ok() { - break; - } - tx_res = exponential_backoff_poll(|| { - electrs.ping().unwrap(); - Some(electrs.transaction_get(&txid)) - }); + if electrs.transaction_get(&txid).is_ok() { + return; } + + exponential_backoff_poll(|| { + electrs.ping().unwrap(); + electrs.transaction_get(&txid).ok() + }); } pub(crate) fn wait_for_outpoint_spend(electrs: &E, outpoint: OutPoint) { let tx = electrs.transaction_get(&outpoint.txid).unwrap(); let txout_script = tx.output.get(outpoint.vout as usize).unwrap().clone().script_pubkey; - let mut is_spent = !electrs.script_get_history(&txout_script).unwrap().is_empty(); - loop { - if is_spent { - break; - } - is_spent = exponential_backoff_poll(|| { - electrs.ping().unwrap(); - Some(!electrs.script_get_history(&txout_script).unwrap().is_empty()) - }); + let is_spent = !electrs.script_get_history(&txout_script).unwrap().is_empty(); + if is_spent { + return; } + + exponential_backoff_poll(|| { + electrs.ping().unwrap(); + + let is_spent = !electrs.script_get_history(&txout_script).unwrap().is_empty(); + is_spent.then_some(()) + }); } pub(crate) fn exponential_backoff_poll(mut poll: F) -> T From 97f404f4d895e9f06268facbbe617a40a6358455 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 18 Sep 2025 20:34:20 +0200 Subject: [PATCH 079/184] Adapt to new pay_for_offer call in upstream LDK Updated `pay_for_offer` call with `OptionalOfferPaymentParams` and delegate to `pay_for_offer_with_quantity` when needed. --- Cargo.toml | 24 ++++++++++++------------ src/payment/bolt12.rs | 43 ++++++++++++++++++++++++++++--------------- 2 files changed, 40 insertions(+), 27 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f3038ee96..1d3f45bfa 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,17 +52,17 @@ default = [] #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } #lightning = { path = "../rust-lightning/lightning", features = ["std"] } #lightning-types = { path = "../rust-lightning/lightning-types" } @@ -109,7 +109,7 @@ winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] #lightning = { version = "0.1.0", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b002e43ec5f9c1cbdcd1ac8588402c5a65ecd2e4", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 81349e2bd..601c03d7d 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -17,7 +17,7 @@ use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, Payme use crate::types::{ChannelManager, PaymentStore}; use lightning::blinded_path::message::BlindedMessagePath; -use lightning::ln::channelmanager::{PaymentId, Retry}; +use lightning::ln::channelmanager::{OptionalOfferPaymentParams, PaymentId, Retry}; use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::routing::router::RouteParametersConfig; @@ -102,15 +102,19 @@ impl Bolt12Payment { }, }; - match self.channel_manager.pay_for_offer( - &offer, - quantity, - None, - payer_note.clone(), - payment_id, + let params = OptionalOfferPaymentParams { + payer_note: payer_note.clone(), retry_strategy, route_params_config, - ) { + }; + let res = if let Some(quantity) = quantity { + self.channel_manager + .pay_for_offer_with_quantity(&offer, None, payment_id, params, quantity) + } else { + self.channel_manager.pay_for_offer(&offer, None, payment_id, params) + }; + + match res { Ok(()) => { let payee_pubkey = offer.issuer_signing_pubkey(); log_info!( @@ -209,15 +213,24 @@ impl Bolt12Payment { return Err(Error::InvalidAmount); } - match self.channel_manager.pay_for_offer( - &offer, - quantity, - Some(amount_msat), - payer_note.clone(), - payment_id, + let params = OptionalOfferPaymentParams { + payer_note: payer_note.clone(), retry_strategy, route_params_config, - ) { + }; + let res = if let Some(quantity) = quantity { + self.channel_manager.pay_for_offer_with_quantity( + &offer, + Some(amount_msat), + payment_id, + params, + quantity, + ) + } else { + self.channel_manager.pay_for_offer(&offer, Some(amount_msat), payment_id, params) + }; + + match res { Ok(()) => { let payee_pubkey = offer.issuer_signing_pubkey(); log_info!( From 904a05f7eda483e4122a4191620c03d6441f59e4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 22 Sep 2025 13:11:23 +0200 Subject: [PATCH 080/184] Try to log status code for `reqwest`'s `Request` error kind We attempt to log a status code when `reqwest` returns a `Request` error kind. It might not be the case that the status code would always/ever be set for this error kind. --- src/chain/esplora.rs | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index 8e9a4dbd4..2226358c1 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -144,12 +144,22 @@ impl EsploraChainSource { }, Err(e) => match *e { esplora_client::Error::Reqwest(he) => { - log_error!( - self.logger, - "{} of on-chain wallet failed due to HTTP connection error: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - he - ); + if let Some(status_code) = he.status() { + log_error!( + self.logger, + "{} of on-chain wallet failed due to HTTP {} error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + status_code, + he, + ); + } else { + log_error!( + self.logger, + "{} of on-chain wallet failed due to HTTP error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + he, + ); + } Err(Error::WalletOperationFailed) }, _ => { From 1192085185eb8bc8b2981c102b596e416276322c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 25 Sep 2025 09:53:59 +0200 Subject: [PATCH 081/184] Bump LDK and account for `FutureSpawner` move The `FutureSpawner` trait moved to `lightning::util::native_async` now. --- Cargo.toml | 24 ++++++++++++------------ src/gossip.rs | 4 +++- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1d3f45bfa..b639b7dc1 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,17 +52,17 @@ default = [] #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } #lightning = { path = "../rust-lightning/lightning", features = ["std"] } #lightning-types = { path = "../rust-lightning/lightning-types" } @@ -109,7 +109,7 @@ winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] #lightning = { version = "0.1.0", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "50391d3a3efa7a8f32d119d126a633e4b1981ee6", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" diff --git a/src/gossip.rs b/src/gossip.rs index 258f9f736..efaf3ce89 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -12,7 +12,9 @@ use crate::runtime::Runtime; use crate::types::{GossipSync, Graph, P2PGossipSync, PeerManager, RapidGossipSync, UtxoLookup}; use crate::Error; -use lightning_block_sync::gossip::{FutureSpawner, GossipVerifier}; +use lightning_block_sync::gossip::GossipVerifier; + +use lightning::util::native_async::FutureSpawner; use std::future::Future; use std::sync::atomic::{AtomicU32, Ordering}; From b7598ae7d947a6593f01649a08e29b0d78253b4e Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 4 Sep 2025 16:20:50 +0200 Subject: [PATCH 082/184] Add onion mailbox for async receivers This introduces an in-memory mailbox to hold onion messages until the receiver comes online. This is required for async payment `held_htlc_available` messages. The mailbox is bounded by a maximum number of peers and a maximum number of messages per peer. --- bindings/ldk_node.udl | 9 ++- src/builder.rs | 101 ++++++++++++++++++++----- src/config.rs | 16 +++- src/event.rs | 40 ++++++++-- src/lib.rs | 16 ++-- src/payment/asynchronous/mod.rs | 1 + src/payment/asynchronous/om_mailbox.rs | 99 ++++++++++++++++++++++++ src/payment/bolt12.rs | 16 ++-- tests/common/mod.rs | 11 ++- tests/integration_tests_rust.rs | 37 +++++++-- 10 files changed, 298 insertions(+), 48 deletions(-) create mode 100644 src/payment/asynchronous/om_mailbox.rs diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 9f0ef697e..a6d867e5a 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -13,7 +13,6 @@ dictionary Config { u64 probing_liquidity_limit_multiplier; AnchorChannelsConfig? anchor_channels_config; RouteParametersConfig? route_parameters; - boolean async_payment_services_enabled; }; dictionary AnchorChannelsConfig { @@ -96,6 +95,8 @@ interface Builder { [Throws=BuildError] void set_node_alias(string node_alias); [Throws=BuildError] + void set_async_payments_role(AsyncPaymentsRole? role); + [Throws=BuildError] Node build(); [Throws=BuildError] Node build_with_fs_store(); @@ -356,6 +357,7 @@ enum BuildError { "WalletSetupFailed", "LoggerSetupFailed", "NetworkMismatch", + "AsyncPaymentsConfigMismatch", }; [Trait] @@ -720,6 +722,11 @@ enum Currency { "Signet", }; +enum AsyncPaymentsRole { + "Client", + "Server", +}; + dictionary RouteHintHop { PublicKey src_node_id; u64 short_channel_id; diff --git a/src/builder.rs b/src/builder.rs index b99c44cec..7bca0c2c6 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -7,9 +7,9 @@ use crate::chain::ChainSource; use crate::config::{ - default_user_config, may_announce_channel, AnnounceError, BitcoindRestClientConfig, Config, - ElectrumSyncConfig, EsploraSyncConfig, DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, - DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN, + default_user_config, may_announce_channel, AnnounceError, AsyncPaymentsRole, + BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, + DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN, }; use crate::connection::ConnectionManager; @@ -27,6 +27,7 @@ use crate::liquidity::{ }; use crate::logger::{log_error, LdkLogger, LogLevel, LogWriter, Logger}; use crate::message_handler::NodeCustomMessageHandler; +use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; use crate::peer_store::PeerStore; use crate::runtime::Runtime; use crate::tx_broadcaster::TransactionBroadcaster; @@ -191,6 +192,8 @@ pub enum BuildError { LoggerSetupFailed, /// The given network does not match the node's previously configured network. NetworkMismatch, + /// The role of the node in an asynchronous payments context is not compatible with the current configuration. + AsyncPaymentsConfigMismatch, } impl fmt::Display for BuildError { @@ -219,6 +222,12 @@ impl fmt::Display for BuildError { Self::NetworkMismatch => { write!(f, "Given network does not match the node's previously configured network.") }, + Self::AsyncPaymentsConfigMismatch => { + write!( + f, + "The async payments role is not compatible with the current configuration." + ) + }, } } } @@ -240,6 +249,7 @@ pub struct NodeBuilder { gossip_source_config: Option, liquidity_source_config: Option, log_writer_config: Option, + async_payments_role: Option, runtime_handle: Option, } @@ -266,6 +276,7 @@ impl NodeBuilder { liquidity_source_config, log_writer_config, runtime_handle, + async_payments_role: None, } } @@ -544,6 +555,21 @@ impl NodeBuilder { Ok(self) } + /// Sets the role of the node in an asynchronous payments context. + /// + /// See for more information about the async payments protocol. + pub fn set_async_payments_role( + &mut self, role: Option, + ) -> Result<&mut Self, BuildError> { + if let Some(AsyncPaymentsRole::Server) = role { + may_announce_channel(&self.config) + .map_err(|_| BuildError::AsyncPaymentsConfigMismatch)?; + } + + self.async_payments_role = role; + Ok(self) + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result { @@ -700,6 +726,7 @@ impl NodeBuilder { self.chain_data_source_config.as_ref(), self.gossip_source_config.as_ref(), self.liquidity_source_config.as_ref(), + self.async_payments_role, seed_bytes, runtime, logger, @@ -732,6 +759,7 @@ impl NodeBuilder { self.chain_data_source_config.as_ref(), self.gossip_source_config.as_ref(), self.liquidity_source_config.as_ref(), + self.async_payments_role, seed_bytes, runtime, logger, @@ -989,6 +1017,13 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ()) } + /// Sets the role of the node in an asynchronous payments context. + pub fn set_async_payments_role( + &self, role: Option, + ) -> Result<(), BuildError> { + self.inner.write().unwrap().set_async_payments_role(role).map(|_| ()) + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result, BuildError> { @@ -1082,8 +1117,9 @@ impl ArcedNodeBuilder { fn build_with_store_internal( config: Arc, chain_data_source_config: Option<&ChainDataSourceConfig>, gossip_source_config: Option<&GossipSourceConfig>, - liquidity_source_config: Option<&LiquiditySourceConfig>, seed_bytes: [u8; 64], - runtime: Arc, logger: Arc, kv_store: Arc, + liquidity_source_config: Option<&LiquiditySourceConfig>, + async_payments_role: Option, seed_bytes: [u8; 64], runtime: Arc, + logger: Arc, kv_store: Arc, ) -> Result { optionally_install_rustls_cryptoprovider(); @@ -1378,8 +1414,14 @@ fn build_with_store_internal( 100; } - if config.async_payment_services_enabled { - user_config.accept_forwards_to_priv_channels = true; + if let Some(role) = async_payments_role { + match role { + AsyncPaymentsRole::Server => { + user_config.accept_forwards_to_priv_channels = true; + user_config.enable_htlc_hold = true; + }, + AsyncPaymentsRole::Client => user_config.hold_outbound_htlcs_at_next_hop = true, + } } let message_router = @@ -1452,17 +1494,32 @@ fn build_with_store_internal( } // Initialize the PeerManager - let onion_messenger: Arc = Arc::new(OnionMessenger::new( - Arc::clone(&keys_manager), - Arc::clone(&keys_manager), - Arc::clone(&logger), - Arc::clone(&channel_manager), - message_router, - Arc::clone(&channel_manager), - Arc::clone(&channel_manager), - IgnoringMessageHandler {}, - IgnoringMessageHandler {}, - )); + let onion_messenger: Arc = + if let Some(AsyncPaymentsRole::Server) = async_payments_role { + Arc::new(OnionMessenger::new_with_offline_peer_interception( + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&logger), + Arc::clone(&channel_manager), + message_router, + Arc::clone(&channel_manager), + Arc::clone(&channel_manager), + IgnoringMessageHandler {}, + IgnoringMessageHandler {}, + )) + } else { + Arc::new(OnionMessenger::new( + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&logger), + Arc::clone(&channel_manager), + message_router, + Arc::clone(&channel_manager), + Arc::clone(&channel_manager), + IgnoringMessageHandler {}, + IgnoringMessageHandler {}, + )) + }; let ephemeral_bytes: [u8; 32] = keys_manager.get_secure_random_bytes(); // Initialize the GossipSource @@ -1649,6 +1706,12 @@ fn build_with_store_internal( }, }; + let om_mailbox = if let Some(AsyncPaymentsRole::Server) = async_payments_role { + Some(Arc::new(OnionMessageMailbox::new())) + } else { + None + }; + let (stop_sender, _) = tokio::sync::watch::channel(()); let (background_processor_stop_sender, _) = tokio::sync::watch::channel(()); let is_running = Arc::new(RwLock::new(false)); @@ -1681,6 +1744,8 @@ fn build_with_store_internal( is_running, is_listening, node_metrics, + om_mailbox, + async_payments_role, }) } diff --git a/src/config.rs b/src/config.rs index bb0bd56ba..88b70815d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -179,8 +179,6 @@ pub struct Config { /// **Note:** If unset, default parameters will be used, and you will be able to override the /// parameters on a per-payment basis in the corresponding method calls. pub route_parameters: Option, - /// Whether to enable the static invoice service to support async payment reception for clients. - pub async_payment_services_enabled: bool, } impl Default for Config { @@ -195,7 +193,6 @@ impl Default for Config { anchor_channels_config: Some(AnchorChannelsConfig::default()), route_parameters: None, node_alias: None, - async_payment_services_enabled: false, } } } @@ -537,6 +534,19 @@ impl From for LdkMaxDustHTLCExposure { } } +#[derive(Debug, Clone, Copy)] +/// The role of the node in an asynchronous payments context. +/// +/// See for more information about the async payments protocol. +pub enum AsyncPaymentsRole { + /// Node acts a client in an async payments context. This means that if possible, it will instruct its peers to hold + /// HTLCs for it, so that it can go offline. + Client, + /// Node acts as a server in an async payments context. This means that it will hold async payments HTLCs and onion + /// messages for its peers. + Server, +} + #[cfg(test)] mod tests { use std::str::FromStr; diff --git a/src/event.rs b/src/event.rs index cd9146379..1d1acfafa 100644 --- a/src/event.rs +++ b/src/event.rs @@ -5,7 +5,8 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::types::{CustomTlvRecord, DynStore, PaymentStore, Sweeper, Wallet}; +use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; +use crate::types::{CustomTlvRecord, DynStore, OnionMessenger, PaymentStore, Sweeper, Wallet}; use crate::{ hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, UserChannelId, @@ -459,6 +460,8 @@ where logger: L, config: Arc, static_invoice_store: Option, + onion_messenger: Arc, + om_mailbox: Option>, } impl EventHandler @@ -472,7 +475,8 @@ where output_sweeper: Arc, network_graph: Arc, liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>, - static_invoice_store: Option, runtime: Arc, logger: L, + static_invoice_store: Option, onion_messenger: Arc, + om_mailbox: Option>, runtime: Arc, logger: L, config: Arc, ) -> Self { Self { @@ -490,6 +494,8 @@ where runtime, config, static_invoice_store, + onion_messenger, + om_mailbox, } } @@ -1491,11 +1497,33 @@ where self.bump_tx_event_handler.handle_event(&bte).await; }, - LdkEvent::OnionMessageIntercepted { .. } => { - debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); + LdkEvent::OnionMessageIntercepted { peer_node_id, message } => { + if let Some(om_mailbox) = self.om_mailbox.as_ref() { + om_mailbox.onion_message_intercepted(peer_node_id, message); + } else { + log_trace!( + self.logger, + "Onion message intercepted, but no onion message mailbox available" + ); + } }, - LdkEvent::OnionMessagePeerConnected { .. } => { - debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); + LdkEvent::OnionMessagePeerConnected { peer_node_id } => { + if let Some(om_mailbox) = self.om_mailbox.as_ref() { + let messages = om_mailbox.onion_message_peer_connected(peer_node_id); + + for message in messages { + if let Err(e) = + self.onion_messenger.forward_onion_message(message, &peer_node_id) + { + log_trace!( + self.logger, + "Failed to forward onion message to peer {}: {:?}", + peer_node_id, + e + ); + } + } + } }, LdkEvent::PersistStaticInvoice { diff --git a/src/lib.rs b/src/lib.rs index e7e27273b..046343231 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -127,8 +127,8 @@ pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ - default_user_config, may_announce_channel, ChannelConfig, Config, NODE_ANN_BCAST_INTERVAL, - PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, + default_user_config, may_announce_channel, AsyncPaymentsRole, ChannelConfig, Config, + NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; use event::{EventHandler, EventQueue}; @@ -136,6 +136,7 @@ use gossip::GossipSource; use graph::NetworkGraph; use io::utils::write_node_metrics; use liquidity::{LSPS1Liquidity, LiquiditySource}; +use payment::asynchronous::om_mailbox::OnionMessageMailbox; use payment::asynchronous::static_invoice_store::StaticInvoiceStore; use payment::{ Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment, @@ -205,6 +206,8 @@ pub struct Node { is_running: Arc>, is_listening: Arc, node_metrics: Arc>, + om_mailbox: Option>, + async_payments_role: Option, } impl Node { @@ -499,7 +502,8 @@ impl Node { Arc::clone(&self.logger), )); - let static_invoice_store = if self.config.async_payment_services_enabled { + let static_invoice_store = if let Some(AsyncPaymentsRole::Server) = self.async_payments_role + { Some(StaticInvoiceStore::new(Arc::clone(&self.kv_store))) } else { None @@ -517,6 +521,8 @@ impl Node { Arc::clone(&self.payment_store), Arc::clone(&self.peer_store), static_invoice_store, + Arc::clone(&self.onion_messenger), + self.om_mailbox.clone(), Arc::clone(&self.runtime), Arc::clone(&self.logger), Arc::clone(&self.config), @@ -826,9 +832,9 @@ impl Node { Bolt12Payment::new( Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), - Arc::clone(&self.config), Arc::clone(&self.is_running), Arc::clone(&self.logger), + self.async_payments_role, ) } @@ -840,9 +846,9 @@ impl Node { Arc::new(Bolt12Payment::new( Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), - Arc::clone(&self.config), Arc::clone(&self.is_running), Arc::clone(&self.logger), + self.async_payments_role, )) } diff --git a/src/payment/asynchronous/mod.rs b/src/payment/asynchronous/mod.rs index ebb7a4bd3..c28f6e243 100644 --- a/src/payment/asynchronous/mod.rs +++ b/src/payment/asynchronous/mod.rs @@ -5,5 +5,6 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +pub(crate) mod om_mailbox; mod rate_limiter; pub(crate) mod static_invoice_store; diff --git a/src/payment/asynchronous/om_mailbox.rs b/src/payment/asynchronous/om_mailbox.rs new file mode 100644 index 000000000..9a7478706 --- /dev/null +++ b/src/payment/asynchronous/om_mailbox.rs @@ -0,0 +1,99 @@ +use std::collections::{HashMap, VecDeque}; +use std::sync::Mutex; + +use bitcoin::secp256k1::PublicKey; +use lightning::ln::msgs::OnionMessage; + +pub(crate) struct OnionMessageMailbox { + map: Mutex>>, +} + +impl OnionMessageMailbox { + const MAX_MESSAGES_PER_PEER: usize = 30; + const MAX_PEERS: usize = 300; + + pub fn new() -> Self { + Self { map: Mutex::new(HashMap::with_capacity(Self::MAX_PEERS)) } + } + + pub(crate) fn onion_message_intercepted(&self, peer_node_id: PublicKey, message: OnionMessage) { + let mut map = self.map.lock().unwrap(); + + let queue = map.entry(peer_node_id).or_insert_with(VecDeque::new); + if queue.len() >= Self::MAX_MESSAGES_PER_PEER { + queue.pop_front(); + } + queue.push_back(message); + + // Enforce a peers limit. If exceeded, evict the peer with the longest queue. + if map.len() > Self::MAX_PEERS { + let peer_to_remove = + map.iter().max_by_key(|(_, queue)| queue.len()).map(|(peer, _)| *peer).unwrap(); + + map.remove(&peer_to_remove); + } + } + + pub(crate) fn onion_message_peer_connected( + &self, peer_node_id: PublicKey, + ) -> Vec { + let mut map = self.map.lock().unwrap(); + + if let Some(queue) = map.remove(&peer_node_id) { + queue.into() + } else { + Vec::new() + } + } + + #[cfg(test)] + pub(crate) fn is_empty(&self) -> bool { + let map = self.map.lock().unwrap(); + map.is_empty() + } +} + +#[cfg(test)] +mod tests { + use bitcoin::key::Secp256k1; + use bitcoin::secp256k1::{PublicKey, SecretKey}; + use lightning::onion_message; + + use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; + + #[test] + fn onion_message_mailbox() { + let mailbox = OnionMessageMailbox::new(); + + let secp = Secp256k1::new(); + let sk_bytes = [12; 32]; + let sk = SecretKey::from_slice(&sk_bytes).unwrap(); + let peer_node_id = PublicKey::from_secret_key(&secp, &sk); + + let blinding_sk = SecretKey::from_slice(&[13; 32]).unwrap(); + let blinding_point = PublicKey::from_secret_key(&secp, &blinding_sk); + + let message_sk = SecretKey::from_slice(&[13; 32]).unwrap(); + let message_point = PublicKey::from_secret_key(&secp, &message_sk); + + let message = lightning::ln::msgs::OnionMessage { + blinding_point, + onion_routing_packet: onion_message::packet::Packet { + version: 0, + public_key: message_point, + hop_data: vec![1, 2, 3], + hmac: [0; 32], + }, + }; + mailbox.onion_message_intercepted(peer_node_id, message.clone()); + + let messages = mailbox.onion_message_peer_connected(peer_node_id); + assert_eq!(messages.len(), 1); + assert_eq!(messages[0], message); + + assert!(mailbox.is_empty()); + + let messages = mailbox.onion_message_peer_connected(peer_node_id); + assert_eq!(messages.len(), 0); + } +} diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 601c03d7d..6cb2f0b85 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -9,7 +9,7 @@ //! //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md -use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; +use crate::config::{AsyncPaymentsRole, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::error::Error; use crate::ffi::{maybe_deref, maybe_wrap}; use crate::logger::{log_error, log_info, LdkLogger, Logger}; @@ -57,16 +57,17 @@ pub struct Bolt12Payment { channel_manager: Arc, payment_store: Arc, is_running: Arc>, - config: Arc, logger: Arc, + async_payments_role: Option, } impl Bolt12Payment { pub(crate) fn new( channel_manager: Arc, payment_store: Arc, - config: Arc, is_running: Arc>, logger: Arc, + is_running: Arc>, logger: Arc, + async_payments_role: Option, ) -> Self { - Self { channel_manager, payment_store, config, is_running, logger } + Self { channel_manager, payment_store, is_running, logger, async_payments_role } } /// Send a payment given an offer. @@ -554,8 +555,11 @@ impl Bolt12Payment { fn blinded_paths_for_async_recipient_internal( &self, recipient_id: Vec, ) -> Result, Error> { - if !self.config.async_payment_services_enabled { - return Err(Error::AsyncPaymentServicesDisabled); + match self.async_payments_role { + Some(AsyncPaymentsRole::Server) => {}, + _ => { + return Err(Error::AsyncPaymentServicesDisabled); + }, } self.channel_manager diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 0a1e8cbd2..aa09b86d0 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -12,7 +12,7 @@ pub(crate) mod logging; use logging::TestLogWriter; -use ldk_node::config::{Config, ElectrumSyncConfig, EsploraSyncConfig}; +use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ @@ -310,6 +310,13 @@ pub(crate) fn setup_two_nodes( pub(crate) fn setup_node( chain_source: &TestChainSource, config: TestConfig, seed_bytes: Option>, +) -> TestNode { + setup_node_for_async_payments(chain_source, config, seed_bytes, None) +} + +pub(crate) fn setup_node_for_async_payments( + chain_source: &TestChainSource, config: TestConfig, seed_bytes: Option>, + async_payments_role: Option, ) -> TestNode { setup_builder!(builder, config.node_config); match chain_source { @@ -375,6 +382,8 @@ pub(crate) fn setup_node( } } + builder.set_async_payments_role(async_payments_role).unwrap(); + let test_sync_store = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.into())); let node = builder.build_with_store(test_sync_store).unwrap(); node.start().unwrap(); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index f2e8407cd..63fc737b3 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -16,10 +16,11 @@ use common::{ logging::{init_log_logger, validate_log_entry, TestLogWriter}, open_channel, open_channel_push_amt, premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, - setup_node, setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, + setup_node, setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, + TestSyncStore, }; -use ldk_node::config::EsploraSyncConfig; +use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, @@ -1132,7 +1133,7 @@ fn simple_bolt12_send_receive() { } #[test] -fn static_invoice_server() { +fn async_payment() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1141,20 +1142,33 @@ fn static_invoice_server() { config_sender.node_config.node_alias = None; config_sender.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender ".to_string()))); - let node_sender = setup_node(&chain_source, config_sender, None); + let node_sender = setup_node_for_async_payments( + &chain_source, + config_sender, + None, + Some(AsyncPaymentsRole::Client), + ); let mut config_sender_lsp = random_config(true); - config_sender_lsp.node_config.async_payment_services_enabled = true; config_sender_lsp.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender_lsp ".to_string()))); - let node_sender_lsp = setup_node(&chain_source, config_sender_lsp, None); + let node_sender_lsp = setup_node_for_async_payments( + &chain_source, + config_sender_lsp, + None, + Some(AsyncPaymentsRole::Server), + ); let mut config_receiver_lsp = random_config(true); - config_receiver_lsp.node_config.async_payment_services_enabled = true; config_receiver_lsp.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("receiver_lsp".to_string()))); - let node_receiver_lsp = setup_node(&chain_source, config_receiver_lsp, None); + let node_receiver_lsp = setup_node_for_async_payments( + &chain_source, + config_receiver_lsp, + None, + Some(AsyncPaymentsRole::Server), + ); let mut config_receiver = random_config(true); config_receiver.node_config.listening_addresses = None; @@ -1241,9 +1255,16 @@ fn static_invoice_server() { std::thread::sleep(std::time::Duration::from_millis(100)); }; + node_receiver.stop().unwrap(); + let payment_id = node_sender.bolt12_payment().send_using_amount(&offer, 5_000, None, None).unwrap(); + // Sleep to allow the payment reach a state where the htlc is held and waiting for the receiver to come online. + std::thread::sleep(std::time::Duration::from_millis(3000)); + + node_receiver.start().unwrap(); + expect_payment_successful_event!(node_sender, Some(payment_id), None); } From 2ae346693d00d6831b92d835d1e47da805d723da Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Sun, 28 Sep 2025 11:03:08 +0200 Subject: [PATCH 083/184] Re-format all imports Nightly `rustfmt` allows to auto-group imports on the module level. While we're not quite convinced to switch to the nightly channel for this yet (mostly because not all contributors would have the right nightly version installed on their machines), we here make use of `cargo +nightly fmt` with some addtional import grouping options as a one-off. This cleans up our imports for the whole crate and gets us to a consistent state everywhere. --- rustfmt.toml | 2 + src/balance.rs | 9 +- src/builder.rs | 78 ++++++++--------- src/chain/bitcoind.rs | 50 ++++++----- src/chain/electrum.rs | 42 ++++------ src/chain/esplora.rs | 27 +++--- src/chain/mod.rs | 18 ++-- src/config.rs | 23 ++--- src/connection.rs | 15 ++-- src/data_store.rs | 16 ++-- src/error.rs | 4 +- src/event.rs | 80 ++++++++---------- src/fee_estimator.rs | 12 +-- src/ffi/types.rs | 84 ++++++++----------- src/gossip.rs | 17 ++-- src/graph.rs | 8 +- src/io/sqlite_store/migrations.rs | 10 +-- src/io/sqlite_store/mod.rs | 10 +-- src/io/test_utils.rs | 10 +-- src/io/utils.rs | 54 ++++++------ src/io/vss_store.rs | 21 +++-- src/lib.rs | 67 ++++++--------- src/liquidity.rs | 35 ++++---- src/logger.rs | 15 ++-- src/message_handler.rs | 13 ++- src/payment/asynchronous/rate_limiter.rs | 4 +- .../asynchronous/static_invoice_store.rs | 43 +++++----- src/payment/bolt11.rs | 28 +++---- src/payment/bolt12.rs | 20 ++--- src/payment/onchain.rs | 8 +- src/payment/spontaneous.rs | 16 ++-- src/payment/store.rs | 11 ++- src/payment/unified_qr.rs | 27 +++--- src/peer_store.rs | 24 +++--- src/runtime.rs | 12 +-- src/tx_broadcaster.rs | 11 +-- src/types.rs | 34 ++++---- src/wallet/mod.rs | 58 ++++++------- src/wallet/persist.rs | 10 +-- src/wallet/ser.rs | 15 ++-- tests/common/logging.rs | 6 +- tests/common/mod.rs | 39 ++++----- tests/integration_tests_cln.rs | 19 ++--- tests/integration_tests_lnd.rs | 28 +++---- tests/integration_tests_rust.rs | 32 +++---- tests/integration_tests_vss.rs | 3 +- tests/reorg_test.rs | 6 +- 47 files changed, 518 insertions(+), 656 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 4f88472be..66161555c 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -10,3 +10,5 @@ match_block_trailing_comma = true # UNSTABLE: format_macro_matchers = true # UNSTABLE: format_strings = true # UNSTABLE: group_imports = "StdExternalCrate" +# UNSTABLE: reorder_imports = true +# UNSTABLE: imports_granularity = "Module" diff --git a/src/balance.rs b/src/balance.rs index 7ba4826a9..d96278dae 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -5,17 +5,14 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use lightning::chain::channelmonitor::Balance as LdkBalance; -use lightning::chain::channelmonitor::BalanceSource; +use bitcoin::secp256k1::PublicKey; +use bitcoin::{Amount, BlockHash, Txid}; +use lightning::chain::channelmonitor::{Balance as LdkBalance, BalanceSource}; use lightning::ln::types::ChannelId; use lightning::sign::SpendableOutputDescriptor; use lightning::util::sweep::{OutputSpendStatus, TrackedSpendableOutput}; - use lightning_types::payment::{PaymentHash, PaymentPreimage}; -use bitcoin::secp256k1::PublicKey; -use bitcoin::{Amount, BlockHash, Txid}; - /// Details of the known available balances returned by [`Node::list_balances`]. /// /// [`Node::list_balances`]: crate::Node::list_balances diff --git a/src/builder.rs b/src/builder.rs index 7bca0c2c6..cf414ec57 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -5,13 +5,47 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::collections::HashMap; +use std::convert::TryInto; +use std::default::Default; +use std::path::PathBuf; +use std::sync::atomic::AtomicBool; +use std::sync::{Arc, Mutex, Once, RwLock}; +use std::time::SystemTime; +use std::{fmt, fs}; + +use bdk_wallet::template::Bip84; +use bdk_wallet::{KeychainKind, Wallet as BdkWallet}; +use bip39::Mnemonic; +use bitcoin::bip32::{ChildNumber, Xpriv}; +use bitcoin::secp256k1::PublicKey; +use bitcoin::{BlockHash, Network}; +use lightning::chain::{chainmonitor, BestBlock, Watch}; +use lightning::io::Cursor; +use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; +use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; +use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; +use lightning::routing::gossip::NodeAlias; +use lightning::routing::router::DefaultRouter; +use lightning::routing::scoring::{ + ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, +}; +use lightning::sign::{EntropySource, NodeSigner}; +use lightning::util::persist::{ + read_channel_monitors, CHANNEL_MANAGER_PERSISTENCE_KEY, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, +}; +use lightning::util::ser::ReadableArgs; +use lightning::util::sweep::OutputSweeper; +use lightning_persister::fs_store::FilesystemStore; +use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; + use crate::chain::ChainSource; use crate::config::{ default_user_config, may_announce_channel, AnnounceError, AsyncPaymentsRole, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN, }; - use crate::connection::ConnectionManager; use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; @@ -39,48 +73,6 @@ use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; use crate::{Node, NodeMetrics}; -use lightning::chain::{chainmonitor, BestBlock, Watch}; -use lightning::io::Cursor; -use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; -use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; -use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; -use lightning::routing::gossip::NodeAlias; -use lightning::routing::router::DefaultRouter; -use lightning::routing::scoring::{ - ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, -}; -use lightning::sign::{EntropySource, NodeSigner}; - -use lightning::util::persist::{ - read_channel_monitors, CHANNEL_MANAGER_PERSISTENCE_KEY, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, -}; -use lightning::util::ser::ReadableArgs; -use lightning::util::sweep::OutputSweeper; - -use lightning_persister::fs_store::FilesystemStore; - -use bdk_wallet::template::Bip84; -use bdk_wallet::KeychainKind; -use bdk_wallet::Wallet as BdkWallet; - -use bip39::Mnemonic; - -use bitcoin::secp256k1::PublicKey; -use bitcoin::{BlockHash, Network}; - -use bitcoin::bip32::{ChildNumber, Xpriv}; -use std::collections::HashMap; -use std::convert::TryInto; -use std::default::Default; -use std::fmt; -use std::fs; -use std::path::PathBuf; -use std::sync::atomic::AtomicBool; -use std::sync::{Arc, Mutex, Once, RwLock}; -use std::time::SystemTime; -use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; - const VSS_HARDENED_CHILD_INDEX: u32 = 877; const VSS_LNURL_AUTH_HARDENED_CHILD_INDEX: u32 = 138; const LSPS_HARDENED_CHILD_INDEX: u32 = 577; diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 7157e5a4f..d4f0cd891 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -5,24 +5,17 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; - -use crate::config::{ - BitcoindRestClientConfig, Config, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, -}; -use crate::fee_estimator::{ - apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, - ConfirmationTarget, OnchainFeeEstimator, -}; -use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; -use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; -use crate::{Error, NodeMetrics}; +use std::collections::{HashMap, VecDeque}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use bitcoin::{BlockHash, FeeRate, Network, Transaction, Txid}; use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; use lightning::chain::Listen; use lightning::util::ser::Writeable; - use lightning_block_sync::gossip::UtxoSource; use lightning_block_sync::http::{HttpEndpoint, JsonResponse}; use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; @@ -30,20 +23,23 @@ use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; use lightning_block_sync::rest::RestClient; use lightning_block_sync::rpc::{RpcClient, RpcError}; use lightning_block_sync::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, + AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceErrorKind, Cache, + SpvClient, }; -use lightning_block_sync::{BlockSourceErrorKind, SpvClient}; - use serde::Serialize; -use base64::prelude::BASE64_STANDARD; -use base64::Engine; -use bitcoin::{BlockHash, FeeRate, Network, Transaction, Txid}; - -use std::collections::{HashMap, VecDeque}; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; +use crate::config::{ + BitcoindRestClientConfig, Config, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, +}; +use crate::fee_estimator::{ + apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, + ConfirmationTarget, OnchainFeeEstimator, +}; +use crate::io::utils::write_node_metrics; +use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::{Error, NodeMetrics}; const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; @@ -1417,7 +1413,9 @@ mod tests { use bitcoin::hashes::Hash; use bitcoin::{FeeRate, OutPoint, ScriptBuf, Transaction, TxIn, TxOut, Txid, Witness}; use lightning_block_sync::http::JsonResponse; - use proptest::{arbitrary::any, collection::vec, prop_assert_eq, prop_compose, proptest}; + use proptest::arbitrary::any; + use proptest::collection::vec; + use proptest::{prop_assert_eq, prop_compose, proptest}; use serde_json::json; use crate::chain::bitcoind::{ diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 40d929ce7..dbd0d9f7f 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -5,8 +5,25 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; +use std::collections::HashMap; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +use bdk_chain::bdk_core::spk_client::{ + FullScanRequest as BdkFullScanRequest, FullScanResponse as BdkFullScanResponse, + SyncRequest as BdkSyncRequest, SyncResponse as BdkSyncResponse, +}; +use bdk_electrum::BdkElectrumClient; +use bdk_wallet::{KeychainKind as BdkKeyChainKind, Update as BdkUpdate}; +use bitcoin::{FeeRate, Network, Script, ScriptBuf, Transaction, Txid}; +use electrum_client::{ + Batch, Client as ElectrumClient, ConfigBuilder as ElectrumConfigBuilder, ElectrumApi, +}; +use lightning::chain::{Confirm, Filter, WatchedOutput}; +use lightning::util::ser::Writeable; +use lightning_transaction_sync::ElectrumSyncClient; +use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; use crate::config::{ Config, ElectrumSyncConfig, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, @@ -22,29 +39,6 @@ use crate::runtime::Runtime; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::NodeMetrics; -use lightning::chain::{Confirm, Filter, WatchedOutput}; -use lightning::util::ser::Writeable; -use lightning_transaction_sync::ElectrumSyncClient; - -use bdk_chain::bdk_core::spk_client::FullScanRequest as BdkFullScanRequest; -use bdk_chain::bdk_core::spk_client::FullScanResponse as BdkFullScanResponse; -use bdk_chain::bdk_core::spk_client::SyncRequest as BdkSyncRequest; -use bdk_chain::bdk_core::spk_client::SyncResponse as BdkSyncResponse; -use bdk_wallet::KeychainKind as BdkKeyChainKind; -use bdk_wallet::Update as BdkUpdate; - -use bdk_electrum::BdkElectrumClient; - -use electrum_client::Client as ElectrumClient; -use electrum_client::ConfigBuilder as ElectrumConfigBuilder; -use electrum_client::{Batch, ElectrumApi}; - -use bitcoin::{FeeRate, Network, Script, ScriptBuf, Transaction, Txid}; - -use std::collections::HashMap; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; - const BDK_ELECTRUM_CLIENT_BATCH_SIZE: usize = 5; const ELECTRUM_CLIENT_NUM_RETRIES: u8 = 3; const ELECTRUM_CLIENT_TIMEOUT_SECS: u8 = 10; diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index 2226358c1..be6f2fb86 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -5,8 +5,18 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; +use std::collections::HashMap; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +use bdk_esplora::EsploraAsyncExt; +use bitcoin::{FeeRate, Network, Script, Transaction, Txid}; +use esplora_client::AsyncClient as EsploraAsyncClient; +use lightning::chain::{Confirm, Filter, WatchedOutput}; +use lightning::util::ser::Writeable; +use lightning_transaction_sync::EsploraSyncClient; +use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; use crate::config::{ Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, @@ -21,21 +31,6 @@ use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; -use lightning::chain::{Confirm, Filter, WatchedOutput}; -use lightning::util::ser::Writeable; - -use lightning_transaction_sync::EsploraSyncClient; - -use bdk_esplora::EsploraAsyncExt; - -use esplora_client::AsyncClient as EsploraAsyncClient; - -use bitcoin::{FeeRate, Network, Script, Transaction, Txid}; - -use std::collections::HashMap; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; - pub(super) struct EsploraChainSource { pub(super) sync_config: EsploraSyncConfig, esplora_client: EsploraAsyncClient, diff --git a/src/chain/mod.rs b/src/chain/mod.rs index f3a29e984..309d60eab 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -9,6 +9,14 @@ mod bitcoind; mod electrum; mod esplora; +use std::collections::HashMap; +use std::sync::{Arc, RwLock}; +use std::time::Duration; + +use bitcoin::{Script, Txid}; +use lightning::chain::Filter; +use lightning_block_sync::gossip::UtxoSource; + use crate::chain::bitcoind::BitcoindChainSource; use crate::chain::electrum::ElectrumChainSource; use crate::chain::esplora::EsploraChainSource; @@ -23,16 +31,6 @@ use crate::runtime::Runtime; use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; -use lightning::chain::Filter; - -use lightning_block_sync::gossip::UtxoSource; - -use bitcoin::{Script, Txid}; - -use std::collections::HashMap; -use std::sync::{Arc, RwLock}; -use std::time::Duration; - pub(crate) enum WalletSyncStatus { Completed, InProgress { subscribers: tokio::sync::broadcast::Sender> }, diff --git a/src/config.rs b/src/config.rs index 88b70815d..d221dd6c3 100644 --- a/src/config.rs +++ b/src/config.rs @@ -7,20 +7,19 @@ //! Objects for configuring the node. -use crate::logger::LogLevel; +use std::fmt; +use std::time::Duration; +use bitcoin::secp256k1::PublicKey; +use bitcoin::Network; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; use lightning::routing::router::RouteParametersConfig; -use lightning::util::config::ChannelConfig as LdkChannelConfig; -use lightning::util::config::MaxDustHTLCExposure as LdkMaxDustHTLCExposure; -use lightning::util::config::UserConfig; +use lightning::util::config::{ + ChannelConfig as LdkChannelConfig, MaxDustHTLCExposure as LdkMaxDustHTLCExposure, UserConfig, +}; -use bitcoin::secp256k1::PublicKey; -use bitcoin::Network; - -use std::fmt; -use std::time::Duration; +use crate::logger::LogLevel; // Config defaults const DEFAULT_NETWORK: Network = Network::Bitcoin; @@ -551,11 +550,7 @@ pub enum AsyncPaymentsRole { mod tests { use std::str::FromStr; - use super::may_announce_channel; - use super::AnnounceError; - use super::Config; - use super::NodeAlias; - use super::SocketAddress; + use super::{may_announce_channel, AnnounceError, Config, NodeAlias, SocketAddress}; #[test] fn node_announce_channel() { diff --git a/src/connection.rs b/src/connection.rs index c4cde717a..e3a25f357 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -5,20 +5,19 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::logger::{log_error, log_info, LdkLogger}; -use crate::types::PeerManager; -use crate::Error; - -use lightning::ln::msgs::SocketAddress; - -use bitcoin::secp256k1::PublicKey; - use std::collections::hash_map::{self, HashMap}; use std::net::ToSocketAddrs; use std::ops::Deref; use std::sync::{Arc, Mutex}; use std::time::Duration; +use bitcoin::secp256k1::PublicKey; +use lightning::ln::msgs::SocketAddress; + +use crate::logger::{log_error, log_info, LdkLogger}; +use crate::types::PeerManager; +use crate::Error; + pub(crate) struct ConnectionManager where L::Target: LdkLogger, diff --git a/src/data_store.rs b/src/data_store.rs index 45802c272..f9dbaa788 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -5,16 +5,15 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::logger::{log_error, LdkLogger}; -use crate::types::DynStore; -use crate::Error; +use std::collections::{hash_map, HashMap}; +use std::ops::Deref; +use std::sync::{Arc, Mutex}; use lightning::util::ser::{Readable, Writeable}; -use std::collections::hash_map; -use std::collections::HashMap; -use std::ops::Deref; -use std::sync::{Arc, Mutex}; +use crate::logger::{log_error, LdkLogger}; +use crate::types::DynStore; +use crate::Error; pub(crate) trait StorableObject: Clone + Readable + Writeable { type Id: StorableObjectId; @@ -164,9 +163,8 @@ mod tests { use lightning::impl_writeable_tlv_based; use lightning::util::test_utils::{TestLogger, TestStore}; - use crate::hex_utils; - use super::*; + use crate::hex_utils; #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] struct TestObjectId { diff --git a/src/error.rs b/src/error.rs index eaa022e56..ae47c5ba8 100644 --- a/src/error.rs +++ b/src/error.rs @@ -5,14 +5,14 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::fmt; + use bdk_chain::bitcoin::psbt::ExtractTxError as BdkExtractTxError; use bdk_chain::local_chain::CannotConnectError as BdkChainConnectionError; use bdk_chain::tx_graph::CalculateFeeError as BdkChainCalculateFeeError; use bdk_wallet::error::CreateTxError as BdkCreateTxError; use bdk_wallet::signer::SignerError as BdkSignerError; -use std::fmt; - #[derive(Copy, Clone, Debug, PartialEq, Eq)] /// An error that possibly needs to be handled by the user. pub enum Error { diff --git a/src/event.rs b/src/event.rs index 1d1acfafa..1236c7cf2 100644 --- a/src/event.rs +++ b/src/event.rs @@ -5,36 +5,19 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; -use crate::types::{CustomTlvRecord, DynStore, OnionMessenger, PaymentStore, Sweeper, Wallet}; -use crate::{ - hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, - UserChannelId, -}; - -use crate::config::{may_announce_channel, Config}; -use crate::connection::ConnectionManager; -use crate::data_store::DataStoreUpdateResult; -use crate::fee_estimator::ConfirmationTarget; -use crate::liquidity::LiquiditySource; -use crate::logger::Logger; - -use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; -use crate::payment::store::{ - PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, -}; - -use crate::io::{ - EVENT_QUEUE_PERSISTENCE_KEY, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, -}; -use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger}; - -use crate::runtime::Runtime; +use core::future::Future; +use core::task::{Poll, Waker}; +use std::collections::VecDeque; +use std::ops::Deref; +use std::sync::{Arc, Condvar, Mutex}; +use bitcoin::blockdata::locktime::absolute::LockTime; +use bitcoin::secp256k1::PublicKey; +use bitcoin::{Amount, OutPoint}; use lightning::events::bump_transaction::BumpTransactionEvent; -use lightning::events::{ClosureReason, PaymentPurpose, ReplayEvent}; -use lightning::events::{Event as LdkEvent, PaymentFailureReason}; +use lightning::events::{ + ClosureReason, Event as LdkEvent, PaymentFailureReason, PaymentPurpose, ReplayEvent, +}; use lightning::impl_writeable_tlv_based_enum; use lightning::ln::channelmanager::PaymentId; use lightning::ln::types::ChannelId; @@ -44,22 +27,31 @@ use lightning::util::config::{ }; use lightning::util::errors::APIError; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; - -use lightning_types::payment::{PaymentHash, PaymentPreimage}; - use lightning_liquidity::lsps2::utils::compute_opening_fee; - -use bitcoin::blockdata::locktime::absolute::LockTime; -use bitcoin::secp256k1::PublicKey; -use bitcoin::{Amount, OutPoint}; - +use lightning_types::payment::{PaymentHash, PaymentPreimage}; use rand::{thread_rng, Rng}; -use core::future::Future; -use core::task::{Poll, Waker}; -use std::collections::VecDeque; -use std::ops::Deref; -use std::sync::{Arc, Condvar, Mutex}; +use crate::config::{may_announce_channel, Config}; +use crate::connection::ConnectionManager; +use crate::data_store::DataStoreUpdateResult; +use crate::fee_estimator::ConfirmationTarget; +use crate::io::{ + EVENT_QUEUE_PERSISTENCE_KEY, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, +}; +use crate::liquidity::LiquiditySource; +use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; +use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; +use crate::payment::store::{ + PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, +}; +use crate::runtime::Runtime; +use crate::types::{CustomTlvRecord, DynStore, OnionMessenger, PaymentStore, Sweeper, Wallet}; +use crate::{ + hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, + UserChannelId, +}; /// An event emitted by [`Node`], which should be handled by the user. /// @@ -1599,11 +1591,13 @@ where #[cfg(test)] mod tests { - use super::*; - use lightning::util::test_utils::{TestLogger, TestStore}; use std::sync::atomic::{AtomicU16, Ordering}; use std::time::Duration; + use lightning::util::test_utils::{TestLogger, TestStore}; + + use super::*; + #[tokio::test] async fn event_queue_persistence() { let store: Arc = Arc::new(TestStore::new(false)); diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index f8ddcd5fd..b787ecd33 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -5,15 +5,15 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; -use lightning::chain::chaininterface::FeeEstimator as LdkFeeEstimator; -use lightning::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; - -use bitcoin::FeeRate; - use std::collections::HashMap; use std::sync::RwLock; +use bitcoin::FeeRate; +use lightning::chain::chaininterface::{ + ConfirmationTarget as LdkConfirmationTarget, FeeEstimator as LdkFeeEstimator, + FEERATE_FLOOR_SATS_PER_KW, +}; + #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub(crate) enum ConfirmationTarget { /// The default target for onchain payments. diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 02d321787..b64bd730e 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -10,63 +10,52 @@ // // Make sure to add any re-exported items that need to be used in uniffi below. -pub use crate::config::{ - default_config, AnchorChannelsConfig, BackgroundSyncConfig, ElectrumSyncConfig, - EsploraSyncConfig, MaxDustHTLCExposure, -}; -pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; -pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig}; -pub use crate::logger::{LogLevel, LogRecord, LogWriter}; -pub use crate::payment::store::{ - ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, -}; -pub use crate::payment::QrPaymentResult; +use std::convert::TryInto; +use std::ops::Deref; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; +pub use bip39::Mnemonic; +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; +use bitcoin::secp256k1::PublicKey; +pub use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, Txid}; pub use lightning::chain::channelmonitor::BalanceSource; pub use lightning::events::{ClosureReason, PaymentFailureReason}; +use lightning::ln::channelmanager::PaymentId; pub use lightning::ln::types::ChannelId; +use lightning::offers::invoice::Bolt12Invoice as LdkBolt12Invoice; pub use lightning::offers::offer::OfferId; +use lightning::offers::offer::{Amount as LdkAmount, Offer as LdkOffer}; +use lightning::offers::refund::Refund as LdkRefund; pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::routing::router::RouteParametersConfig; -pub use lightning_types::string::UntrustedString; - -pub use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; - +use lightning::util::ser::Writeable; +use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; pub use lightning_invoice::{Description, SignedRawBolt11Invoice}; - pub use lightning_liquidity::lsps0::ser::LSPSDateTime; pub use lightning_liquidity::lsps1::msgs::{ LSPS1ChannelInfo, LSPS1OrderId, LSPS1OrderParams, LSPS1PaymentState, }; - -pub use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, Txid}; - -pub use bip39::Mnemonic; - +pub use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +pub use lightning_types::string::UntrustedString; pub use vss_client::headers::{VssHeaderProvider, VssHeaderProviderError}; -use crate::UniffiCustomTypeConverter; - use crate::builder::sanitize_alias; +pub use crate::config::{ + default_config, AnchorChannelsConfig, BackgroundSyncConfig, ElectrumSyncConfig, + EsploraSyncConfig, MaxDustHTLCExposure, +}; use crate::error::Error; -use crate::hex_utils; -use crate::{SocketAddress, UserChannelId}; - -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; -use bitcoin::secp256k1::PublicKey; -use lightning::ln::channelmanager::PaymentId; -use lightning::offers::invoice::Bolt12Invoice as LdkBolt12Invoice; -use lightning::offers::offer::{Amount as LdkAmount, Offer as LdkOffer}; -use lightning::offers::refund::Refund as LdkRefund; -use lightning::util::ser::Writeable; -use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; - -use std::convert::TryInto; -use std::ops::Deref; -use std::str::FromStr; -use std::sync::Arc; -use std::time::Duration; +pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; +pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig}; +pub use crate::logger::{LogLevel, LogRecord, LogWriter}; +pub use crate::payment::store::{ + ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, +}; +pub use crate::payment::QrPaymentResult; +use crate::{hex_utils, SocketAddress, UniffiCustomTypeConverter, UserChannelId}; impl UniffiCustomTypeConverter for PublicKey { type Builtin = String; @@ -1177,16 +1166,13 @@ impl UniffiCustomTypeConverter for LSPSDateTime { #[cfg(test)] mod tests { - use std::{ - num::NonZeroU64, - time::{SystemTime, UNIX_EPOCH}, - }; + use std::num::NonZeroU64; + use std::time::{SystemTime, UNIX_EPOCH}; + + use lightning::offers::offer::{OfferBuilder, Quantity}; + use lightning::offers::refund::RefundBuilder; use super::*; - use lightning::offers::{ - offer::{OfferBuilder, Quantity}, - refund::RefundBuilder, - }; fn create_test_bolt11_invoice() -> (LdkBolt11Invoice, Bolt11Invoice) { let invoice_string = "lnbc1pn8g249pp5f6ytj32ty90jhvw69enf30hwfgdhyymjewywcmfjevflg6s4z86qdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdfjjzh2j9f7ye5qqqqryqqqqthqqpysp5mm832athgcal3m7h35sc29j63lmgzvwc5smfjh2es65elc2ns7dq9qrsgqu2xcje2gsnjp0wn97aknyd3h58an7sjj6nhcrm40846jxphv47958c6th76whmec8ttr2wmg6sxwchvxmsc00kqrzqcga6lvsf9jtqgqy5yexa"; diff --git a/src/gossip.rs b/src/gossip.rs index efaf3ce89..01aff4742 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -5,6 +5,14 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::future::Future; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::Arc; +use std::time::Duration; + +use lightning::util::native_async::FutureSpawner; +use lightning_block_sync::gossip::GossipVerifier; + use crate::chain::ChainSource; use crate::config::RGS_SYNC_TIMEOUT_SECS; use crate::logger::{log_trace, LdkLogger, Logger}; @@ -12,15 +20,6 @@ use crate::runtime::Runtime; use crate::types::{GossipSync, Graph, P2PGossipSync, PeerManager, RapidGossipSync, UtxoLookup}; use crate::Error; -use lightning_block_sync::gossip::GossipVerifier; - -use lightning::util::native_async::FutureSpawner; - -use std::future::Future; -use std::sync::atomic::{AtomicU32, Ordering}; -use std::sync::Arc; -use std::time::Duration; - pub(crate) enum GossipSource { P2PNetwork { gossip_sync: Arc, diff --git a/src/graph.rs b/src/graph.rs index 3e4e58c88..f2daebb9f 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -7,19 +7,17 @@ //! Objects for querying the network graph. -use crate::types::Graph; - -use lightning::routing::gossip::NodeId; +use std::sync::Arc; #[cfg(feature = "uniffi")] use lightning::ln::msgs::SocketAddress; +use lightning::routing::gossip::NodeId; #[cfg(feature = "uniffi")] use lightning::routing::gossip::RoutingFees; - #[cfg(not(feature = "uniffi"))] use lightning::routing::gossip::{ChannelInfo, NodeInfo}; -use std::sync::Arc; +use crate::types::Graph; /// Represents the network as nodes and channels between them. pub struct NetworkGraph { diff --git a/src/io/sqlite_store/migrations.rs b/src/io/sqlite_store/migrations.rs index 15e60bcc2..abfbdf6ef 100644 --- a/src/io/sqlite_store/migrations.rs +++ b/src/io/sqlite_store/migrations.rs @@ -5,9 +5,8 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use rusqlite::Connection; - use lightning::io; +use rusqlite::Connection; pub(super) fn migrate_schema( connection: &mut Connection, kv_table_name: &str, from_version: u16, to_version: u16, @@ -75,14 +74,13 @@ pub(super) fn migrate_schema( #[cfg(test)] mod tests { - use crate::io::sqlite_store::SqliteStore; - use crate::io::test_utils::{do_read_write_remove_list_persist, random_storage_path}; + use std::fs; use lightning::util::persist::KVStoreSync; - use rusqlite::{named_params, Connection}; - use std::fs; + use crate::io::sqlite_store::SqliteStore; + use crate::io::test_utils::{do_read_write_remove_list_persist, random_storage_path}; #[test] fn rwrl_post_schema_1_migration() { diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 4006ab2cc..d18c7440d 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -6,18 +6,16 @@ // accordance with one or both of these licenses. //! Objects related to [`SqliteStore`] live here. -use crate::io::utils::check_namespace_key_validity; +use std::fs; +use std::path::PathBuf; +use std::sync::{Arc, Mutex}; use lightning::io; use lightning::util::persist::KVStoreSync; - use lightning_types::string::PrintableString; - use rusqlite::{named_params, Connection}; -use std::fs; -use std::path::PathBuf; -use std::sync::{Arc, Mutex}; +use crate::io::utils::check_namespace_key_validity; mod migrations; diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 244dd9cdc..067664851 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -5,22 +5,20 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::panic::RefUnwindSafe; +use std::path::PathBuf; + +use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, }; use lightning::util::persist::{read_channel_monitors, KVStoreSync, KVSTORE_NAMESPACE_KEY_MAX_LEN}; - -use lightning::events::ClosureReason; use lightning::util::test_utils; use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; - use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; -use std::panic::RefUnwindSafe; -use std::path::PathBuf; - pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); let mut rng = thread_rng(); diff --git a/src/io/utils.rs b/src/io/utils.rs index 51e7be505..0cc910ad7 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -5,20 +5,20 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use super::*; -use crate::config::WALLET_KEYS_SEED_LEN; - -use crate::chain::ChainSource; -use crate::fee_estimator::OnchainFeeEstimator; -use crate::io::{ - NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, -}; -use crate::logger::{log_error, LdkLogger, Logger}; -use crate::peer_store::PeerStore; -use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; -use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; -use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; +use std::fs; +use std::io::Write; +use std::ops::Deref; +use std::path::Path; +use std::sync::Arc; +use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; +use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; +use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; +use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; +use bdk_chain::ConfirmationBlockTime; +use bdk_wallet::ChangeSet as BdkWalletChangeSet; +use bip39::Mnemonic; +use bitcoin::Network; use lightning::io::Cursor; use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; @@ -32,25 +32,21 @@ use lightning::util::persist::{ }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use lightning::util::sweep::OutputSweeper; - use lightning_types::string::PrintableString; - -use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; -use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; -use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; -use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; -use bdk_chain::ConfirmationBlockTime; -use bdk_wallet::ChangeSet as BdkWalletChangeSet; - -use bip39::Mnemonic; -use bitcoin::Network; use rand::{thread_rng, RngCore}; -use std::fs; -use std::io::Write; -use std::ops::Deref; -use std::path::Path; -use std::sync::Arc; +use super::*; +use crate::chain::ChainSource; +use crate::config::WALLET_KEYS_SEED_LEN; +use crate::fee_estimator::OnchainFeeEstimator; +use crate::io::{ + NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, +}; +use crate::logger::{log_error, LdkLogger, Logger}; +use crate::peer_store::PeerStore; +use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; +use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; +use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; /// Generates a random [BIP 39] mnemonic. /// diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 87f966a9b..a03aafc44 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -5,18 +5,16 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::io::utils::check_namespace_key_validity; -use crate::runtime::Runtime; +#[cfg(test)] +use std::panic::RefUnwindSafe; +use std::sync::Arc; +use std::time::Duration; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::KVStoreSync; use prost::Message; use rand::RngCore; -#[cfg(test)] -use std::panic::RefUnwindSafe; -use std::sync::Arc; -use std::time::Duration; use vss_client::client::VssClient; use vss_client::error::VssError; use vss_client::headers::VssHeaderProvider; @@ -31,6 +29,9 @@ use vss_client::util::retry::{ }; use vss_client::util::storable_builder::{EntropySource, StorableBuilder}; +use crate::io::utils::check_namespace_key_validity; +use crate::runtime::Runtime; + type CustomRetryPolicy = FilteredRetryPolicy< JitteredRetryPolicy< MaxTotalDelayRetryPolicy>>, @@ -256,14 +257,16 @@ impl RefUnwindSafe for VssStore {} #[cfg(test)] #[cfg(vss_test)] mod tests { - use super::*; - use crate::io::test_utils::do_read_write_remove_list_persist; + use std::collections::HashMap; + use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng, RngCore}; - use std::collections::HashMap; use tokio::runtime; use vss_client::headers::FixedHeaders; + use super::*; + use crate::io::test_utils::do_read_write_remove_list_persist; + #[test] fn vss_read_write_remove_list_persist() { let runtime = Arc::new(Runtime::new().unwrap()); diff --git a/src/lib.rs b/src/lib.rs index 046343231..0f547ce1d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -99,43 +99,45 @@ mod tx_broadcaster; mod types; mod wallet; -pub use bip39; -pub use bitcoin; -pub use lightning; -pub use lightning_invoice; -pub use lightning_liquidity; -pub use lightning_types; -pub use tokio; -pub use vss_client; +use std::default::Default; +use std::net::ToSocketAddrs; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; -pub use error::Error as NodeError; -use error::Error; - -pub use event::Event; - -pub use io::utils::generate_entropy_mnemonic; - -#[cfg(feature = "uniffi")] -use ffi::*; - +use bitcoin::secp256k1::PublicKey; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; pub use builder::BuildError; #[cfg(not(feature = "uniffi"))] pub use builder::NodeBuilder as Builder; - use chain::ChainSource; use config::{ default_user_config, may_announce_channel, AsyncPaymentsRole, ChannelConfig, Config, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; +pub use error::Error as NodeError; +use error::Error; +pub use event::Event; use event::{EventHandler, EventQueue}; +#[cfg(feature = "uniffi")] +use ffi::*; use gossip::GossipSource; use graph::NetworkGraph; +pub use io::utils::generate_entropy_mnemonic; use io::utils::write_node_metrics; +use lightning::chain::BestBlock; +use lightning::events::bump_transaction::Wallet as LdkWallet; +use lightning::impl_writeable_tlv_based; +use lightning::ln::channel_state::ChannelShutdownState; +use lightning::ln::channelmanager::PaymentId; +use lightning::ln::msgs::SocketAddress; +use lightning::routing::gossip::NodeAlias; +use lightning_background_processor::process_events_async_with_kv_store_sync; use liquidity::{LSPS1Liquidity, LiquiditySource}; +use logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use payment::asynchronous::om_mailbox::OnionMessageMailbox; use payment::asynchronous::static_invoice_store::StaticInvoiceStore; use payment::{ @@ -143,34 +145,17 @@ use payment::{ UnifiedQrPayment, }; use peer_store::{PeerInfo, PeerStore}; +use rand::Rng; use runtime::Runtime; use types::{ Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, }; pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, UserChannelId}; - -use logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; - -use lightning::chain::BestBlock; -use lightning::events::bump_transaction::Wallet as LdkWallet; -use lightning::impl_writeable_tlv_based; -use lightning::ln::channel_state::ChannelShutdownState; -use lightning::ln::channelmanager::PaymentId; -use lightning::ln::msgs::SocketAddress; -use lightning::routing::gossip::NodeAlias; - -use lightning_background_processor::process_events_async_with_kv_store_sync; - -use bitcoin::secp256k1::PublicKey; - -use rand::Rng; - -use std::default::Default; -use std::net::ToSocketAddrs; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +pub use { + bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, + vss_client, +}; #[cfg(feature = "uniffi")] uniffi::include_scaffolding!("ldk_node"); diff --git a/src/liquidity.rs b/src/liquidity.rs index 5d0bf5afe..ae31f9ace 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -7,21 +7,20 @@ //! Objects related to liquidity management. -use crate::chain::ChainSource; -use crate::connection::ConnectionManager; -use crate::logger::{log_debug, log_error, log_info, LdkLogger, Logger}; -use crate::runtime::Runtime; -use crate::types::{ChannelManager, KeysManager, LiquidityManager, PeerManager, Wallet}; -use crate::{total_anchor_channels_reserve_sats, Config, Error}; +use std::collections::HashMap; +use std::ops::Deref; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::Duration; +use bitcoin::hashes::{sha256, Hash}; +use bitcoin::secp256k1::{PublicKey, Secp256k1}; +use chrono::Utc; use lightning::events::HTLCHandlingFailureType; use lightning::ln::channelmanager::{InterceptId, MIN_FINAL_CLTV_EXPIRY_DELTA}; use lightning::ln::msgs::SocketAddress; use lightning::ln::types::ChannelId; use lightning::routing::router::{RouteHint, RouteHintHop}; - use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, InvoiceBuilder, RoutingFees}; - use lightning_liquidity::events::LiquidityEvent; use lightning_liquidity::lsps0::ser::{LSPSDateTime, LSPSRequestId}; use lightning_liquidity::lsps1::client::LSPS1ClientConfig as LdkLSPS1ClientConfig; @@ -35,22 +34,16 @@ use lightning_liquidity::lsps2::msgs::{LSPS2OpeningFeeParams, LSPS2RawOpeningFee use lightning_liquidity::lsps2::service::LSPS2ServiceConfig as LdkLSPS2ServiceConfig; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_liquidity::{LiquidityClientConfig, LiquidityServiceConfig}; - use lightning_types::payment::PaymentHash; - -use bitcoin::hashes::{sha256, Hash}; -use bitcoin::secp256k1::{PublicKey, Secp256k1}; - -use tokio::sync::oneshot; - -use chrono::Utc; - use rand::Rng; +use tokio::sync::oneshot; -use std::collections::HashMap; -use std::ops::Deref; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::Duration; +use crate::chain::ChainSource; +use crate::connection::ConnectionManager; +use crate::logger::{log_debug, log_error, log_info, LdkLogger, Logger}; +use crate::runtime::Runtime; +use crate::types::{ChannelManager, KeysManager, LiquidityManager, PeerManager, Wallet}; +use crate::{total_anchor_channels_reserve_sats, Config, Error}; const LIQUIDITY_REQUEST_TIMEOUT_SECS: u64 = 5; diff --git a/src/logger.rs b/src/logger.rs index 40817897c..4eaefad74 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -7,15 +7,6 @@ //! Logging-related objects. -pub(crate) use lightning::util::logger::{Logger as LdkLogger, Record as LdkRecord}; -pub(crate) use lightning::{log_bytes, log_debug, log_error, log_info, log_trace}; - -pub use lightning::util::logger::Level as LogLevel; - -use chrono::Utc; -use log::Level as LogFacadeLevel; -use log::Record as LogFacadeRecord; - #[cfg(not(feature = "uniffi"))] use core::fmt; use std::fs; @@ -23,6 +14,12 @@ use std::io::Write; use std::path::Path; use std::sync::Arc; +use chrono::Utc; +pub use lightning::util::logger::Level as LogLevel; +pub(crate) use lightning::util::logger::{Logger as LdkLogger, Record as LdkRecord}; +pub(crate) use lightning::{log_bytes, log_debug, log_error, log_info, log_trace}; +use log::{Level as LogFacadeLevel, Record as LogFacadeRecord}; + /// A unit of logging output with metadata to enable filtering `module_path`, /// `file`, and `line` to inform on log's source. #[cfg(not(feature = "uniffi"))] diff --git a/src/message_handler.rs b/src/message_handler.rs index 25995a481..fc206ec4d 100644 --- a/src/message_handler.rs +++ b/src/message_handler.rs @@ -5,21 +5,18 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::liquidity::LiquiditySource; +use std::ops::Deref; +use std::sync::Arc; +use bitcoin::secp256k1::PublicKey; use lightning::ln::peer_handler::CustomMessageHandler; use lightning::ln::wire::CustomMessageReader; use lightning::util::logger::Logger; use lightning::util::ser::LengthLimitedRead; - -use lightning_types::features::{InitFeatures, NodeFeatures}; - use lightning_liquidity::lsps0::ser::RawLSPSMessage; +use lightning_types::features::{InitFeatures, NodeFeatures}; -use bitcoin::secp256k1::PublicKey; - -use std::ops::Deref; -use std::sync::Arc; +use crate::liquidity::LiquiditySource; pub(crate) enum NodeCustomMessageHandler where diff --git a/src/payment/asynchronous/rate_limiter.rs b/src/payment/asynchronous/rate_limiter.rs index 153577b16..671b1dc72 100644 --- a/src/payment/asynchronous/rate_limiter.rs +++ b/src/payment/asynchronous/rate_limiter.rs @@ -72,10 +72,10 @@ impl RateLimiter { #[cfg(test)] mod tests { - use crate::payment::asynchronous::rate_limiter::RateLimiter; - use std::time::Duration; + use crate::payment::asynchronous::rate_limiter::RateLimiter; + #[test] fn rate_limiter_test() { // Test diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index f1aa702a4..e81fd8216 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -7,20 +7,20 @@ //! Store implementation for [`StaticInvoice`]s. -use crate::hex_utils; -use crate::io::STATIC_INVOICE_STORE_PRIMARY_NAMESPACE; -use crate::payment::asynchronous::rate_limiter::RateLimiter; -use crate::types::DynStore; +use std::sync::{Arc, Mutex}; +use std::time::Duration; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; - use lightning::blinded_path::message::BlindedMessagePath; use lightning::impl_writeable_tlv_based; -use lightning::{offers::static_invoice::StaticInvoice, util::ser::Readable, util::ser::Writeable}; +use lightning::offers::static_invoice::StaticInvoice; +use lightning::util::ser::{Readable, Writeable}; -use std::sync::{Arc, Mutex}; -use std::time::Duration; +use crate::hex_utils; +use crate::io::STATIC_INVOICE_STORE_PRIMARY_NAMESPACE; +use crate::payment::asynchronous::rate_limiter::RateLimiter; +use crate::types::DynStore; struct PersistedStaticInvoice { invoice: StaticInvoice, @@ -133,23 +133,18 @@ impl StaticInvoiceStore { #[cfg(test)] mod tests { - use std::{sync::Arc, time::Duration}; - - use bitcoin::{ - key::{Keypair, Secp256k1}, - secp256k1::{PublicKey, SecretKey}, - }; - use lightning::blinded_path::{ - message::BlindedMessagePath, - payment::{BlindedPayInfo, BlindedPaymentPath}, - BlindedHop, - }; + use std::sync::Arc; + use std::time::Duration; + + use bitcoin::key::{Keypair, Secp256k1}; + use bitcoin::secp256k1::{PublicKey, SecretKey}; + use lightning::blinded_path::message::BlindedMessagePath; + use lightning::blinded_path::payment::{BlindedPayInfo, BlindedPaymentPath}; + use lightning::blinded_path::BlindedHop; use lightning::ln::inbound_payment::ExpandedKey; - use lightning::offers::{ - nonce::Nonce, - offer::OfferBuilder, - static_invoice::{StaticInvoice, StaticInvoiceBuilder}, - }; + use lightning::offers::nonce::Nonce; + use lightning::offers::offer::OfferBuilder; + use lightning::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder}; use lightning::sign::EntropySource; use lightning::util::test_utils::TestStore; use lightning_types::features::BlindedHopFeatures; diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 7dcb2817c..60c313381 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -9,6 +9,19 @@ //! //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md +use std::sync::{Arc, RwLock}; + +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; +use lightning::ln::channelmanager::{ + Bolt11InvoiceParameters, Bolt11PaymentError, PaymentId, Retry, RetryableSendFailure, +}; +use lightning::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; +use lightning_invoice::{ + Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescription as LdkBolt11InvoiceDescription, +}; +use lightning_types::payment::{PaymentHash, PaymentPreimage}; + use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::connection::ConnectionManager; use crate::data_store::DataStoreUpdateResult; @@ -24,21 +37,6 @@ use crate::peer_store::{PeerInfo, PeerStore}; use crate::runtime::Runtime; use crate::types::{ChannelManager, PaymentStore}; -use lightning::ln::channelmanager::{ - Bolt11InvoiceParameters, Bolt11PaymentError, PaymentId, Retry, RetryableSendFailure, -}; -use lightning::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; - -use lightning_types::payment::{PaymentHash, PaymentPreimage}; - -use lightning_invoice::Bolt11Invoice as LdkBolt11Invoice; -use lightning_invoice::Bolt11InvoiceDescription as LdkBolt11InvoiceDescription; - -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; - -use std::sync::{Arc, RwLock}; - #[cfg(not(feature = "uniffi"))] type Bolt11Invoice = LdkBolt11Invoice; #[cfg(feature = "uniffi")] diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 6cb2f0b85..337eedf96 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -9,28 +9,26 @@ //! //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md -use crate::config::{AsyncPaymentsRole, LDK_PAYMENT_RETRY_TIMEOUT}; -use crate::error::Error; -use crate::ffi::{maybe_deref, maybe_wrap}; -use crate::logger::{log_error, log_info, LdkLogger, Logger}; -use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; -use crate::types::{ChannelManager, PaymentStore}; +use std::num::NonZeroU64; +use std::sync::{Arc, RwLock}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use lightning::blinded_path::message::BlindedMessagePath; use lightning::ln::channelmanager::{OptionalOfferPaymentParams, PaymentId, Retry}; use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::routing::router::RouteParametersConfig; - #[cfg(feature = "uniffi")] use lightning::util::ser::{Readable, Writeable}; use lightning_types::string::UntrustedString; - use rand::RngCore; -use std::num::NonZeroU64; -use std::sync::{Arc, RwLock}; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use crate::config::{AsyncPaymentsRole, LDK_PAYMENT_RETRY_TIMEOUT}; +use crate::error::Error; +use crate::ffi::{maybe_deref, maybe_wrap}; +use crate::logger::{log_error, log_info, LdkLogger, Logger}; +use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; +use crate::types::{ChannelManager, PaymentStore}; #[cfg(not(feature = "uniffi"))] type Bolt12Invoice = lightning::offers::invoice::Bolt12Invoice; diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index 2614e55ce..c5100d772 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -7,16 +7,16 @@ //! Holds a payment handler allowing to send and receive on-chain payments. +use std::sync::{Arc, RwLock}; + +use bitcoin::{Address, Txid}; + use crate::config::Config; use crate::error::Error; use crate::logger::{log_info, LdkLogger, Logger}; use crate::types::{ChannelManager, Wallet}; use crate::wallet::OnchainSendAmount; -use bitcoin::{Address, Txid}; - -use std::sync::{Arc, RwLock}; - #[cfg(not(feature = "uniffi"))] type FeeRate = bitcoin::FeeRate; #[cfg(feature = "uniffi")] diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 181307a0f..6c074f308 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -7,21 +7,19 @@ //! Holds a payment handler allowing to send spontaneous ("keysend") payments. -use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; -use crate::error::Error; -use crate::logger::{log_error, log_info, LdkLogger, Logger}; -use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; -use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore}; +use std::sync::{Arc, RwLock}; +use bitcoin::secp256k1::PublicKey; use lightning::ln::channelmanager::{PaymentId, RecipientOnionFields, Retry, RetryableSendFailure}; use lightning::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; use lightning::sign::EntropySource; - use lightning_types::payment::{PaymentHash, PaymentPreimage}; -use bitcoin::secp256k1::PublicKey; - -use std::sync::{Arc, RwLock}; +use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; +use crate::error::Error; +use crate::logger::{log_error, log_info, LdkLogger, Logger}; +use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; +use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore}; // The default `final_cltv_expiry_delta` we apply when not set. const LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA: u32 = 144; diff --git a/src/payment/store.rs b/src/payment/store.rs index 568394b48..b17898d9c 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -5,6 +5,9 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use bitcoin::{BlockHash, Txid}; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::DecodeError; use lightning::offers::offer::OfferId; @@ -13,14 +16,9 @@ use lightning::{ _init_and_read_len_prefixed_tlv_fields, impl_writeable_tlv_based, impl_writeable_tlv_based_enum, write_tlv_fields, }; - use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning_types::string::UntrustedString; -use bitcoin::{BlockHash, Txid}; - -use std::time::{Duration, SystemTime, UNIX_EPOCH}; - use crate::data_store::{StorableObject, StorableObjectId, StorableObjectUpdate}; use crate::hex_utils; @@ -607,10 +605,11 @@ impl StorableObjectUpdate for PaymentDetailsUpdate { #[cfg(test)] mod tests { - use super::*; use bitcoin::io::Cursor; use lightning::util::ser::Readable; + use super::*; + /// We refactored `PaymentDetails` to hold a payment id and moved some required fields into /// `PaymentKind`. Here, we keep the old layout available in order test de/ser compatibility. #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index af5ee1c7b..fc2eca150 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -11,23 +11,22 @@ //! [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md -use crate::error::Error; -use crate::ffi::maybe_wrap; -use crate::logger::{log_error, LdkLogger, Logger}; -use crate::payment::{Bolt11Payment, Bolt12Payment, OnchainPayment}; -use crate::Config; - -use lightning::ln::channelmanager::PaymentId; -use lightning::offers::offer::Offer; -use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; +use std::sync::Arc; +use std::vec::IntoIter; use bip21::de::ParamKind; use bip21::{DeserializationError, DeserializeParams, Param, SerializeParams}; use bitcoin::address::{NetworkChecked, NetworkUnchecked}; use bitcoin::{Amount, Txid}; +use lightning::ln::channelmanager::PaymentId; +use lightning::offers::offer::Offer; +use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; -use std::sync::Arc; -use std::vec::IntoIter; +use crate::error::Error; +use crate::ffi::maybe_wrap; +use crate::logger::{log_error, LdkLogger, Logger}; +use crate::payment::{Bolt11Payment, Bolt12Payment, OnchainPayment}; +use crate::Config; type Uri<'a> = bip21::Uri<'a, NetworkChecked, Extras>; @@ -303,10 +302,12 @@ impl DeserializationError for Extras { #[cfg(test)] mod tests { + use std::str::FromStr; + + use bitcoin::{Address, Network}; + use super::*; use crate::payment::unified_qr::Extras; - use bitcoin::{Address, Network}; - use std::str::FromStr; #[test] fn parse_uri() { diff --git a/src/peer_store.rs b/src/peer_store.rs index cf3755d23..5ebdc0419 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -5,6 +5,14 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::collections::HashMap; +use std::ops::Deref; +use std::sync::{Arc, RwLock}; + +use bitcoin::secp256k1::PublicKey; +use lightning::impl_writeable_tlv_based; +use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; + use crate::io::{ PEER_INFO_PERSISTENCE_KEY, PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, @@ -13,15 +21,6 @@ use crate::logger::{log_error, LdkLogger}; use crate::types::DynStore; use crate::{Error, SocketAddress}; -use lightning::impl_writeable_tlv_based; -use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; - -use bitcoin::secp256k1::PublicKey; - -use std::collections::HashMap; -use std::ops::Deref; -use std::sync::{Arc, RwLock}; - pub struct PeerStore where L::Target: LdkLogger, @@ -149,12 +148,13 @@ impl_writeable_tlv_based!(PeerInfo, { #[cfg(test)] mod tests { - use super::*; - use lightning::util::test_utils::{TestLogger, TestStore}; - use std::str::FromStr; use std::sync::Arc; + use lightning::util::test_utils::{TestLogger, TestStore}; + + use super::*; + #[test] fn peer_info_persistence() { let store: Arc = Arc::new(TestStore::new(false)); diff --git a/src/runtime.rs b/src/runtime.rs index b30790a04..2275d5bea 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -5,17 +5,17 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::future::Future; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use tokio::task::{JoinHandle, JoinSet}; + use crate::config::{ BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS, LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, }; use crate::logger::{log_debug, log_error, log_trace, LdkLogger, Logger}; -use tokio::task::{JoinHandle, JoinSet}; - -use std::future::Future; -use std::sync::{Arc, Mutex}; -use std::time::Duration; - pub(crate) struct Runtime { mode: RuntimeMode, background_tasks: Mutex>, diff --git a/src/tx_broadcaster.rs b/src/tx_broadcaster.rs index 4d9397a61..12a1fe650 100644 --- a/src/tx_broadcaster.rs +++ b/src/tx_broadcaster.rs @@ -5,16 +5,13 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::logger::{log_error, LdkLogger}; - -use lightning::chain::chaininterface::BroadcasterInterface; +use std::ops::Deref; use bitcoin::Transaction; +use lightning::chain::chaininterface::BroadcasterInterface; +use tokio::sync::{mpsc, Mutex, MutexGuard}; -use tokio::sync::mpsc; -use tokio::sync::{Mutex, MutexGuard}; - -use std::ops::Deref; +use crate::logger::{log_error, LdkLogger}; const BCAST_PACKAGE_QUEUE_SIZE: usize = 50; diff --git a/src/types.rs b/src/types.rs index 3635badff..f152772a1 100644 --- a/src/types.rs +++ b/src/types.rs @@ -5,41 +5,35 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::chain::ChainSource; -use crate::config::ChannelConfig; -use crate::data_store::DataStore; -use crate::fee_estimator::OnchainFeeEstimator; -use crate::gossip::RuntimeSpawner; -use crate::logger::Logger; -use crate::message_handler::NodeCustomMessageHandler; -use crate::payment::PaymentDetails; +use std::sync::{Arc, Mutex}; +use bitcoin::secp256k1::PublicKey; +use bitcoin::OutPoint; use lightning::chain::chainmonitor; use lightning::impl_writeable_tlv_based; use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; -use lightning::ln::msgs::RoutingMessageHandler; -use lightning::ln::msgs::SocketAddress; +use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::ln::types::ChannelId; use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::persist::KVStoreSync; -use lightning::util::persist::KVStoreSyncWrapper; +use lightning::util::persist::{KVStoreSync, KVStoreSyncWrapper}; use lightning::util::ser::{Readable, Writeable, Writer}; - use lightning::util::sweep::OutputSweeper; use lightning_block_sync::gossip::{GossipVerifier, UtxoSource}; - -use lightning_net_tokio::SocketDescriptor; - use lightning_liquidity::utils::time::DefaultTimeProvider; +use lightning_net_tokio::SocketDescriptor; -use bitcoin::secp256k1::PublicKey; -use bitcoin::OutPoint; - -use std::sync::{Arc, Mutex}; +use crate::chain::ChainSource; +use crate::config::ChannelConfig; +use crate::data_store::DataStore; +use crate::fee_estimator::OnchainFeeEstimator; +use crate::gossip::RuntimeSpawner; +use crate::logger::Logger; +use crate::message_handler::NodeCustomMessageHandler; +use crate::payment::PaymentDetails; pub(crate) type DynStore = dyn KVStoreSync + Sync + Send; diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index c03353ef8..0ce4628d4 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -5,37 +5,13 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use persist::KVStoreWalletPersister; - -use crate::config::Config; -use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; - -use crate::fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; -use crate::payment::store::ConfirmationStatus; -use crate::payment::{PaymentDetails, PaymentDirection, PaymentStatus}; -use crate::types::{Broadcaster, PaymentStore}; -use crate::Error; - -use lightning::chain::chaininterface::BroadcasterInterface; -use lightning::chain::channelmonitor::ANTI_REORG_DELAY; -use lightning::chain::{BestBlock, Listen}; - -use lightning::events::bump_transaction::{Utxo, WalletSource}; -use lightning::ln::channelmanager::PaymentId; -use lightning::ln::inbound_payment::ExpandedKey; -use lightning::ln::msgs::UnsignedGossipMessage; -use lightning::ln::script::ShutdownScript; -use lightning::sign::{ - ChangeDestinationSource, EntropySource, InMemorySigner, KeysManager, NodeSigner, OutputSpender, - PeerStorageKey, Recipient, SignerProvider, SpendableOutputDescriptor, -}; - -use lightning::util::message_signing; -use lightning_invoice::RawBolt11Invoice; +use std::future::Future; +use std::pin::Pin; +use std::str::FromStr; +use std::sync::{Arc, Mutex}; use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; use bdk_wallet::{Balance, KeychainKind, PersistedWallet, SignOptions, Update}; - use bitcoin::address::NetworkUnchecked; use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; use bitcoin::blockdata::locktime::absolute::LockTime; @@ -49,11 +25,29 @@ use bitcoin::{ Address, Amount, FeeRate, Network, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, WitnessProgram, WitnessVersion, }; +use lightning::chain::chaininterface::BroadcasterInterface; +use lightning::chain::channelmonitor::ANTI_REORG_DELAY; +use lightning::chain::{BestBlock, Listen}; +use lightning::events::bump_transaction::{Utxo, WalletSource}; +use lightning::ln::channelmanager::PaymentId; +use lightning::ln::inbound_payment::ExpandedKey; +use lightning::ln::msgs::UnsignedGossipMessage; +use lightning::ln::script::ShutdownScript; +use lightning::sign::{ + ChangeDestinationSource, EntropySource, InMemorySigner, KeysManager, NodeSigner, OutputSpender, + PeerStorageKey, Recipient, SignerProvider, SpendableOutputDescriptor, +}; +use lightning::util::message_signing; +use lightning_invoice::RawBolt11Invoice; +use persist::KVStoreWalletPersister; -use std::future::Future; -use std::pin::Pin; -use std::str::FromStr; -use std::sync::{Arc, Mutex}; +use crate::config::Config; +use crate::fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; +use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::payment::store::ConfirmationStatus; +use crate::payment::{PaymentDetails, PaymentDirection, PaymentStatus}; +use crate::types::{Broadcaster, PaymentStore}; +use crate::Error; pub(crate) enum OnchainSendAmount { ExactRetainingReserve { amount_sats: u64, cur_anchor_reserve_sats: u64 }, diff --git a/src/wallet/persist.rs b/src/wallet/persist.rs index d9e4e7135..5c8668937 100644 --- a/src/wallet/persist.rs +++ b/src/wallet/persist.rs @@ -5,6 +5,11 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::sync::Arc; + +use bdk_chain::Merge; +use bdk_wallet::{ChangeSet, WalletPersister}; + use crate::io::utils::{ read_bdk_wallet_change_set, write_bdk_wallet_change_descriptor, write_bdk_wallet_descriptor, write_bdk_wallet_indexer, write_bdk_wallet_local_chain, write_bdk_wallet_network, @@ -12,11 +17,6 @@ use crate::io::utils::{ }; use crate::logger::{log_error, LdkLogger, Logger}; use crate::types::DynStore; - -use bdk_chain::Merge; -use bdk_wallet::{ChangeSet, WalletPersister}; - -use std::sync::Arc; pub(crate) struct KVStoreWalletPersister { latest_change_set: Option, kv_store: Arc, diff --git a/src/wallet/ser.rs b/src/wallet/ser.rs index ae1509bdf..c1ad984e6 100644 --- a/src/wallet/ser.rs +++ b/src/wallet/ser.rs @@ -5,26 +5,23 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use lightning::ln::msgs::DecodeError; -use lightning::util::ser::{BigSize, Readable, RequiredWrapper, Writeable, Writer}; -use lightning::{decode_tlv_stream, encode_tlv_stream, read_tlv_fields, write_tlv_fields}; +use std::collections::{BTreeMap, BTreeSet}; +use std::str::FromStr; +use std::sync::Arc; use bdk_chain::bdk_core::{BlockId, ConfirmationBlockTime}; use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; use bdk_chain::DescriptorId; - use bdk_wallet::descriptor::Descriptor; use bdk_wallet::keys::DescriptorPublicKey; - use bitcoin::hashes::sha256::Hash as Sha256Hash; use bitcoin::p2p::Magic; use bitcoin::{BlockHash, Network, OutPoint, Transaction, TxOut, Txid}; - -use std::collections::{BTreeMap, BTreeSet}; -use std::str::FromStr; -use std::sync::Arc; +use lightning::ln::msgs::DecodeError; +use lightning::util::ser::{BigSize, Readable, RequiredWrapper, Writeable, Writer}; +use lightning::{decode_tlv_stream, encode_tlv_stream, read_tlv_fields, write_tlv_fields}; const CHANGESET_SERIALIZATION_VERSION: u8 = 1; diff --git a/tests/common/logging.rs b/tests/common/logging.rs index d7d59ba32..3ff24d34d 100644 --- a/tests/common/logging.rs +++ b/tests/common/logging.rs @@ -1,10 +1,10 @@ +use std::sync::{Arc, Mutex}; + use chrono::Utc; -use ldk_node::logger::LogRecord; -use ldk_node::logger::{LogLevel, LogWriter}; +use ldk_node::logger::{LogLevel, LogRecord, LogWriter}; #[cfg(not(feature = "uniffi"))] use log::Record as LogFacadeRecord; use log::{Level as LogFacadeLevel, LevelFilter as LogFacadeLevelFilter, Log as LogFacadeLog}; -use std::sync::{Arc, Mutex}; #[derive(Clone)] pub(crate) enum TestLogWriter { diff --git a/tests/common/mod.rs b/tests/common/mod.rs index aa09b86d0..98c96e307 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -10,46 +10,39 @@ pub(crate) mod logging; -use logging::TestLogWriter; +use std::collections::{HashMap, HashSet}; +use std::env; +use std::path::PathBuf; +use std::sync::{Arc, RwLock}; +use std::time::Duration; +use bitcoin::hashes::hex::FromHex; +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; +use bitcoin::{ + Address, Amount, Network, OutPoint, ScriptBuf, Sequence, Transaction, Txid, Witness, +}; +use electrsd::corepc_node::{Client as BitcoindClient, Node as BitcoinD}; +use electrsd::{corepc_node, ElectrsD}; +use electrum_client::ElectrumApi; use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, }; - use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; use lightning::util::persist::KVStoreSync; use lightning::util::test_utils::TestStore; - use lightning_invoice::{Bolt11InvoiceDescription, Description}; -use lightning_types::payment::{PaymentHash, PaymentPreimage}; - use lightning_persister::fs_store::FilesystemStore; - -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::{hex::FromHex, Hash}; -use bitcoin::{ - Address, Amount, Network, OutPoint, ScriptBuf, Sequence, Transaction, Txid, Witness, -}; - -use electrsd::corepc_node::Client as BitcoindClient; -use electrsd::corepc_node::Node as BitcoinD; -use electrsd::{corepc_node, ElectrsD}; -use electrum_client::ElectrumApi; - +use lightning_types::payment::{PaymentHash, PaymentPreimage}; +use logging::TestLogWriter; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde_json::{json, Value}; -use std::collections::{HashMap, HashSet}; -use std::env; -use std::path::PathBuf; -use std::sync::{Arc, RwLock}; -use std::time::Duration; - macro_rules! expect_event { ($node: expr, $event_type: ident) => {{ match $node.wait_next_event() { diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index f77311fb2..6fc72b2c2 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -9,27 +9,22 @@ mod common; -use ldk_node::bitcoin::secp256k1::PublicKey; -use ldk_node::bitcoin::Amount; -use ldk_node::lightning::ln::msgs::SocketAddress; -use ldk_node::{Builder, Event}; -use lightning_invoice::{Bolt11InvoiceDescription, Description}; +use std::default::Default; +use std::str::FromStr; use clightningrpc::lightningrpc::LightningRPC; use clightningrpc::responses::NetworkAddress; - use electrsd::corepc_client::client_sync::Auth; use electrsd::corepc_node::Client as BitcoindClient; - use electrum_client::Client as ElectrumClient; -use lightning_invoice::Bolt11Invoice; - +use ldk_node::bitcoin::secp256k1::PublicKey; +use ldk_node::bitcoin::Amount; +use ldk_node::lightning::ln::msgs::SocketAddress; +use ldk_node::{Builder, Event}; +use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; -use std::default::Default; -use std::str::FromStr; - #[test] fn test_cln() { // Setup bitcoind / electrs clients diff --git a/tests/integration_tests_lnd.rs b/tests/integration_tests_lnd.rs index 0232e8f2e..7dfc1e4f9 100755 --- a/tests/integration_tests_lnd.rs +++ b/tests/integration_tests_lnd.rs @@ -2,29 +2,25 @@ mod common; +use std::default::Default; +use std::str::FromStr; + +use bitcoin::hex::DisplayHex; +use electrsd::corepc_client::client_sync::Auth; +use electrsd::corepc_node::Client as BitcoindClient; +use electrum_client::Client as ElectrumClient; use ldk_node::bitcoin::secp256k1::PublicKey; use ldk_node::bitcoin::Amount; use ldk_node::lightning::ln::msgs::SocketAddress; use ldk_node::{Builder, Event}; - +use lightning_invoice::{Bolt11InvoiceDescription, Description}; +use lnd_grpc_rust::lnrpc::invoice::InvoiceState::Settled as LndInvoiceStateSettled; use lnd_grpc_rust::lnrpc::{ - invoice::InvoiceState::Settled as LndInvoiceStateSettled, GetInfoRequest as LndGetInfoRequest, - GetInfoResponse as LndGetInfoResponse, Invoice as LndInvoice, - ListInvoiceRequest as LndListInvoiceRequest, QueryRoutesRequest as LndQueryRoutesRequest, - Route as LndRoute, SendRequest as LndSendRequest, + GetInfoRequest as LndGetInfoRequest, GetInfoResponse as LndGetInfoResponse, + Invoice as LndInvoice, ListInvoiceRequest as LndListInvoiceRequest, + QueryRoutesRequest as LndQueryRoutesRequest, Route as LndRoute, SendRequest as LndSendRequest, }; use lnd_grpc_rust::{connect, LndClient}; - -use electrsd::corepc_client::client_sync::Auth; -use electrsd::corepc_node::Client as BitcoindClient; - -use electrum_client::Client as ElectrumClient; -use lightning_invoice::{Bolt11InvoiceDescription, Description}; - -use bitcoin::hex::DisplayHex; - -use std::default::Default; -use std::str::FromStr; use tokio::fs; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 63fc737b3..0db30ea1c 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -7,19 +7,24 @@ mod common; +use std::collections::HashSet; +use std::str::FromStr; +use std::sync::Arc; + +use bitcoin::address::NetworkUnchecked; +use bitcoin::hashes::sha256::Hash as Sha256Hash; +use bitcoin::hashes::Hash; +use bitcoin::{Address, Amount, ScriptBuf}; +use common::logging::{init_log_logger, validate_log_entry, MultiNodeLogger, TestLogWriter}; use common::{ bump_fee_and_broadcast, distribute_funds_unconfirmed, do_channel_full_cycle, expect_channel_pending_event, expect_channel_ready_event, expect_event, expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, - generate_blocks_and_wait, - logging::MultiNodeLogger, - logging::{init_log_logger, validate_log_entry, TestLogWriter}, - open_channel, open_channel_push_amt, premine_and_distribute_funds, premine_blocks, prepare_rbf, - random_config, random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, - setup_node, setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, - TestSyncStore, + generate_blocks_and_wait, open_channel, open_channel_push_amt, premine_and_distribute_funds, + premine_blocks, prepare_rbf, random_config, random_listening_addresses, + setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_node_for_async_payments, + setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, }; - use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ @@ -27,25 +32,14 @@ use ldk_node::payment::{ QrPaymentResult, }; use ldk_node::{Builder, Event, NodeError}; - use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::routing::router::RouteParametersConfig; use lightning::util::persist::KVStoreSync; - use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; - -use bitcoin::address::NetworkUnchecked; -use bitcoin::hashes::sha256::Hash as Sha256Hash; -use bitcoin::hashes::Hash; -use bitcoin::{Address, Amount, ScriptBuf}; use log::LevelFilter; -use std::collections::HashSet; -use std::str::FromStr; -use std::sync::Arc; - #[test] fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 9d6ec158c..bdd876003 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -9,9 +9,10 @@ mod common; -use ldk_node::Builder; use std::collections::HashMap; +use ldk_node::Builder; + #[test] fn channel_full_cycle_with_vss_store() { let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); diff --git a/tests/reorg_test.rs b/tests/reorg_test.rs index 707b67e88..03ace908f 100644 --- a/tests/reorg_test.rs +++ b/tests/reorg_test.rs @@ -1,9 +1,11 @@ mod common; +use std::collections::HashMap; + use bitcoin::Amount; use ldk_node::payment::{PaymentDirection, PaymentKind}; use ldk_node::{Event, LightningBalance, PendingSweepBalance}; -use proptest::{prelude::prop, proptest}; -use std::collections::HashMap; +use proptest::prelude::prop; +use proptest::proptest; use crate::common::{ expect_event, generate_blocks_and_wait, invalidate_blocks, open_channel, From 11b7eb5f8976da4118186ea1e815d03938cce279 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 22 Sep 2025 18:11:36 +0200 Subject: [PATCH 084/184] Use cancellable task for inbound connections So that we'll cleanly signal and wait for termination. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 0f547ce1d..a8f2f87eb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -313,6 +313,7 @@ impl Node { bind_addrs.extend(resolved_address); } + let runtime = Arc::clone(&self.runtime); self.runtime.spawn_cancellable_background_task(async move { { let listener = @@ -338,7 +339,7 @@ impl Node { } res = listener.accept() => { let tcp_stream = res.unwrap().0; - tokio::spawn(async move { + runtime.spawn_cancellable_background_task(async move { lightning_net_tokio::setup_inbound( Arc::clone(&peer_mgr), tcp_stream.into_std().unwrap(), From 6f1edcdb8cf842aad519333fc684e9507df8dc6d Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 22 Sep 2025 14:38:32 +0200 Subject: [PATCH 085/184] Listen on all provided addresses Previously ldk-node would start binding after the first successful bind to an address. --- bindings/ldk_node.udl | 1 - src/builder.rs | 3 - src/lib.rs | 97 ++++++++++++++++++--------------- tests/integration_tests_rust.rs | 24 +++++--- 4 files changed, 67 insertions(+), 58 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index a6d867e5a..bd1e4fc43 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -325,7 +325,6 @@ enum NodeError { dictionary NodeStatus { boolean is_running; - boolean is_listening; BestBlock current_best_block; u64? latest_lightning_wallet_sync_timestamp; u64? latest_onchain_wallet_sync_timestamp; diff --git a/src/builder.rs b/src/builder.rs index cf414ec57..0b3ea3101 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -9,7 +9,6 @@ use std::collections::HashMap; use std::convert::TryInto; use std::default::Default; use std::path::PathBuf; -use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex, Once, RwLock}; use std::time::SystemTime; use std::{fmt, fs}; @@ -1133,7 +1132,6 @@ fn build_with_store_internal( } // Initialize the status fields. - let is_listening = Arc::new(AtomicBool::new(false)); let node_metrics = match read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)) { Ok(metrics) => Arc::new(RwLock::new(metrics)), Err(e) => { @@ -1734,7 +1732,6 @@ fn build_with_store_internal( peer_store, payment_store, is_running, - is_listening, node_metrics, om_mailbox, async_payments_role, diff --git a/src/lib.rs b/src/lib.rs index a8f2f87eb..a075cfac5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -101,7 +101,6 @@ mod wallet; use std::default::Default; use std::net::ToSocketAddrs; -use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; @@ -189,7 +188,6 @@ pub struct Node { peer_store: Arc>>, payment_store: Arc, is_running: Arc>, - is_listening: Arc, node_metrics: Arc>, om_mailbox: Option>, async_payments_role: Option, @@ -293,9 +291,7 @@ impl Node { if let Some(listening_addresses) = &self.config.listening_addresses { // Setup networking let peer_manager_connection_handler = Arc::clone(&self.peer_manager); - let mut stop_listen = self.stop_sender.subscribe(); let listening_logger = Arc::clone(&self.logger); - let listening_indicator = Arc::clone(&self.is_listening); let mut bind_addrs = Vec::with_capacity(listening_addresses.len()); @@ -313,46 +309,62 @@ impl Node { bind_addrs.extend(resolved_address); } - let runtime = Arc::clone(&self.runtime); - self.runtime.spawn_cancellable_background_task(async move { - { - let listener = - tokio::net::TcpListener::bind(&*bind_addrs).await - .unwrap_or_else(|e| { - log_error!(listening_logger, "Failed to bind to listen addresses/ports - is something else already listening on it?: {}", e); - panic!( - "Failed to bind to listen address/port - is something else already listening on it?", - ); - }); - - listening_indicator.store(true, Ordering::Release); - - loop { - let peer_mgr = Arc::clone(&peer_manager_connection_handler); - tokio::select! { - _ = stop_listen.changed() => { - log_debug!( - listening_logger, - "Stopping listening to inbound connections." + let logger = Arc::clone(&listening_logger); + let listeners = self.runtime.block_on(async move { + let mut listeners = Vec::new(); + + // Try to bind to all addresses + for addr in &*bind_addrs { + match tokio::net::TcpListener::bind(addr).await { + Ok(listener) => { + log_trace!(logger, "Listener bound to {}", addr); + listeners.push(listener); + }, + Err(e) => { + log_error!( + logger, + "Failed to bind to {}: {} - is something else already listening?", + addr, + e ); - break; - } - res = listener.accept() => { - let tcp_stream = res.unwrap().0; - runtime.spawn_cancellable_background_task(async move { - lightning_net_tokio::setup_inbound( - Arc::clone(&peer_mgr), - tcp_stream.into_std().unwrap(), - ) - .await; - }); - } + return Err(Error::InvalidSocketAddress); + }, } } - } - listening_indicator.store(false, Ordering::Release); - }); + Ok(listeners) + })?; + + for listener in listeners { + let logger = Arc::clone(&listening_logger); + let peer_mgr = Arc::clone(&peer_manager_connection_handler); + let mut stop_listen = self.stop_sender.subscribe(); + let runtime = Arc::clone(&self.runtime); + self.runtime.spawn_cancellable_background_task(async move { + loop { + tokio::select! { + _ = stop_listen.changed() => { + log_debug!( + logger, + "Stopping listening to inbound connections." + ); + break; + } + res = listener.accept() => { + let tcp_stream = res.unwrap().0; + let peer_mgr = Arc::clone(&peer_mgr); + runtime.spawn_cancellable_background_task(async move { + lightning_net_tokio::setup_inbound( + Arc::clone(&peer_mgr), + tcp_stream.into_std().unwrap(), + ) + .await; + }); + } + } + } + }); + } } // Regularly reconnect to persisted peers. @@ -667,7 +679,6 @@ impl Node { /// Returns the status of the [`Node`]. pub fn status(&self) -> NodeStatus { let is_running = *self.is_running.read().unwrap(); - let is_listening = self.is_listening.load(Ordering::Acquire); let current_best_block = self.channel_manager.current_best_block().into(); let locked_node_metrics = self.node_metrics.read().unwrap(); let latest_lightning_wallet_sync_timestamp = @@ -685,7 +696,6 @@ impl Node { NodeStatus { is_running, - is_listening, current_best_block, latest_lightning_wallet_sync_timestamp, latest_onchain_wallet_sync_timestamp, @@ -1496,9 +1506,6 @@ impl Drop for Node { pub struct NodeStatus { /// Indicates whether the [`Node`] is running. pub is_running: bool, - /// Indicates whether the [`Node`] is listening for incoming connections on the addresses - /// configured via [`Config::listening_addresses`]. - pub is_listening: bool, /// The best block to which our Lightning wallet is currently synced. pub current_best_block: BestBlock, /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully synced diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 0db30ea1c..cca52ae2d 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -817,6 +817,21 @@ fn sign_verify_msg() { assert!(node.verify_signature(msg, sig.as_str(), &pkey)); } +#[test] +fn connection_multi_listen() { + let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); + + let node_id_b = node_b.node_id(); + + let node_addrs_b = node_b.listening_addresses().unwrap(); + for node_addr_b in &node_addrs_b { + node_a.connect(node_id_b, node_addr_b.clone(), false).unwrap(); + node_a.disconnect(node_id_b).unwrap(); + } +} + #[test] fn connection_restart_behavior() { do_connection_restart_behavior(true); @@ -832,11 +847,6 @@ fn do_connection_restart_behavior(persist: bool) { let node_id_b = node_b.node_id(); let node_addr_b = node_b.listening_addresses().unwrap().first().unwrap().clone(); - - while !node_b.status().is_listening { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - node_a.connect(node_id_b, node_addr_b, persist).unwrap(); let peer_details_a = node_a.list_peers().first().unwrap().clone(); @@ -886,10 +896,6 @@ fn concurrent_connections_succeed() { let node_id_b = node_b.node_id(); let node_addr_b = node_b.listening_addresses().unwrap().first().unwrap().clone(); - while !node_b.status().is_listening { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - let mut handles = Vec::new(); for _ in 0..10 { let thread_node = Arc::clone(&node_a); From 0f621ff117e9a144ff790ea6172d5c202132a5c7 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 23 Sep 2025 10:26:47 +0200 Subject: [PATCH 086/184] Do not use random ports from ephemeral range To avoid conflicts ("port in use") with ports that were used for outgoing connections and are now in the TIME_WAIT state. --- tests/common/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 98c96e307..1331fc047 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -195,7 +195,7 @@ pub(crate) fn random_storage_path() -> PathBuf { pub(crate) fn random_port() -> u16 { let mut rng = thread_rng(); - rng.gen_range(5000..65535) + rng.gen_range(5000..32768) } pub(crate) fn random_listening_addresses() -> Vec { From 52a5f9aa1bbf7058455240818596cd4d48e62e8c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 12 Sep 2025 09:58:56 +0200 Subject: [PATCH 087/184] Move current VSS `KVStoreSync` logic to `_internal` methods .. first step to make review easier. --- src/io/vss_store.rs | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index a03aafc44..64143dd42 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -126,10 +126,8 @@ impl VssStore { } Ok(keys) } -} -impl KVStoreSync for VssStore { - fn read( + fn read_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; @@ -160,7 +158,7 @@ impl KVStoreSync for VssStore { Ok(self.storable_builder.deconstruct(storable)?.0) } - fn write( + fn write_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; @@ -188,7 +186,7 @@ impl KVStoreSync for VssStore { Ok(()) } - fn remove( + fn remove_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; @@ -211,7 +209,9 @@ impl KVStoreSync for VssStore { Ok(()) } - fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + fn list_internal( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; let keys = self @@ -229,6 +229,30 @@ impl KVStoreSync for VssStore { } } +impl KVStoreSync for VssStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + self.write_internal(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + self.remove_internal(primary_namespace, secondary_namespace, key, lazy) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.list_internal(primary_namespace, secondary_namespace) + } +} + fn derive_data_encryption_and_obfuscation_keys(vss_seed: &[u8; 32]) -> ([u8; 32], [u8; 32]) { let hkdf = |initial_key_material: &[u8], salt: &[u8]| -> [u8; 32] { let mut engine = HmacEngine::::new(salt); From bc313f94bf1190b44bda0eaa6151bda8c719e8d9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 12 Sep 2025 10:03:30 +0200 Subject: [PATCH 088/184] Make VSS internal methods `async`, move `block_on` to `impl KVStoreSync` .. as we're gonna reuse the `async` `_internal` methods shortly. --- src/io/vss_store.rs | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 64143dd42..02cb54e78 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -127,7 +127,7 @@ impl VssStore { Ok(keys) } - fn read_internal( + async fn read_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; @@ -135,7 +135,7 @@ impl VssStore { store_id: self.store_id.clone(), key: self.build_key(primary_namespace, secondary_namespace, key)?, }; - let resp = self.runtime.block_on(self.client.get_object(&request)).map_err(|e| { + let resp = self.client.get_object(&request).await.map_err(|e| { let msg = format!( "Failed to read from key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e @@ -145,6 +145,7 @@ impl VssStore { _ => Error::new(ErrorKind::Other, msg), } })?; + // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { @@ -158,7 +159,7 @@ impl VssStore { Ok(self.storable_builder.deconstruct(storable)?.0) } - fn write_internal( + async fn write_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; @@ -175,7 +176,7 @@ impl VssStore { delete_items: vec![], }; - self.runtime.block_on(self.client.put_object(&request)).map_err(|e| { + self.client.put_object(&request).await.map_err(|e| { let msg = format!( "Failed to write to key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e @@ -186,7 +187,7 @@ impl VssStore { Ok(()) } - fn remove_internal( + async fn remove_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; @@ -199,25 +200,24 @@ impl VssStore { }), }; - self.runtime.block_on(self.client.delete_object(&request)).map_err(|e| { + self.client.delete_object(&request).await.map_err(|e| { let msg = format!( "Failed to delete key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e ); Error::new(ErrorKind::Other, msg) })?; + Ok(()) } - fn list_internal( + async fn list_internal( &self, primary_namespace: &str, secondary_namespace: &str, ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; - let keys = self - .runtime - .block_on(self.list_all_keys(primary_namespace, secondary_namespace)) - .map_err(|e| { + let keys = + self.list_all_keys(primary_namespace, secondary_namespace).await.map_err(|e| { let msg = format!( "Failed to retrieve keys in namespace: {}/{} : {}", primary_namespace, secondary_namespace, e @@ -233,23 +233,27 @@ impl KVStoreSync for VssStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { - self.read_internal(primary_namespace, secondary_namespace, key) + let fut = self.read_internal(primary_namespace, secondary_namespace, key); + self.runtime.block_on(fut) } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { - self.write_internal(primary_namespace, secondary_namespace, key, buf) + let fut = self.write_internal(primary_namespace, secondary_namespace, key, buf); + self.runtime.block_on(fut) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> io::Result<()> { - self.remove_internal(primary_namespace, secondary_namespace, key, lazy) + let fut = self.remove_internal(primary_namespace, secondary_namespace, key, lazy); + self.runtime.block_on(fut) } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { - self.list_internal(primary_namespace, secondary_namespace) + let fut = self.list_internal(primary_namespace, secondary_namespace); + self.runtime.block_on(fut) } } From 2b40a8064077246c198afb530af36318033f13c8 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 12 Sep 2025 10:40:22 +0200 Subject: [PATCH 089/184] Split `VssStore` into `VssStore` and `VssStoreInner` .. where the former holds the latter in an `Arc` that can be used in async/`Future` contexts more easily. --- src/io/vss_store.rs | 78 ++++++++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 32 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 02cb54e78..b85477f5e 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -41,17 +41,59 @@ type CustomRetryPolicy = FilteredRetryPolicy< /// A [`KVStoreSync`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. pub struct VssStore { + inner: Arc, + runtime: Arc, +} + +impl VssStore { + pub(crate) fn new( + base_url: String, store_id: String, vss_seed: [u8; 32], + header_provider: Arc, runtime: Arc, + ) -> Self { + let inner = Arc::new(VssStoreInner::new(base_url, store_id, vss_seed, header_provider)); + Self { inner, runtime } + } +} + +impl KVStoreSync for VssStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + let fut = self.inner.read_internal(primary_namespace, secondary_namespace, key); + self.runtime.block_on(fut) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let fut = self.inner.write_internal(primary_namespace, secondary_namespace, key, buf); + self.runtime.block_on(fut) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + let fut = self.inner.remove_internal(primary_namespace, secondary_namespace, key, lazy); + self.runtime.block_on(fut) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + let fut = self.inner.list_internal(primary_namespace, secondary_namespace); + self.runtime.block_on(fut) + } +} + +struct VssStoreInner { client: VssClient, store_id: String, - runtime: Arc, storable_builder: StorableBuilder, key_obfuscator: KeyObfuscator, } -impl VssStore { +impl VssStoreInner { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], - header_provider: Arc, runtime: Arc, + header_provider: Arc, ) -> Self { let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); @@ -71,7 +113,7 @@ impl VssStore { }) as _); let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); - Self { client, store_id, runtime, storable_builder, key_obfuscator } + Self { client, store_id, storable_builder, key_obfuscator } } fn build_key( @@ -229,34 +271,6 @@ impl VssStore { } } -impl KVStoreSync for VssStore { - fn read( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> io::Result> { - let fut = self.read_internal(primary_namespace, secondary_namespace, key); - self.runtime.block_on(fut) - } - - fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> io::Result<()> { - let fut = self.write_internal(primary_namespace, secondary_namespace, key, buf); - self.runtime.block_on(fut) - } - - fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> io::Result<()> { - let fut = self.remove_internal(primary_namespace, secondary_namespace, key, lazy); - self.runtime.block_on(fut) - } - - fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { - let fut = self.list_internal(primary_namespace, secondary_namespace); - self.runtime.block_on(fut) - } -} - fn derive_data_encryption_and_obfuscation_keys(vss_seed: &[u8; 32]) -> ([u8; 32], [u8; 32]) { let hkdf = |initial_key_material: &[u8], salt: &[u8]| -> [u8; 32] { let mut engine = HmacEngine::::new(salt); From 679b6d3e4167484bc5b19066df40b3771814808a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 26 Sep 2025 09:12:52 +0200 Subject: [PATCH 090/184] Refactor infallible `build_key` to not return an error --- src/io/vss_store.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index b85477f5e..7c3ccdd86 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -116,14 +116,12 @@ impl VssStoreInner { Self { client, store_id, storable_builder, key_obfuscator } } - fn build_key( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> io::Result { + fn build_key(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> String { let obfuscated_key = self.key_obfuscator.obfuscate(key); if primary_namespace.is_empty() { - Ok(obfuscated_key) + obfuscated_key } else { - Ok(format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key)) + format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key) } } @@ -175,7 +173,7 @@ impl VssStoreInner { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; let request = GetObjectRequest { store_id: self.store_id.clone(), - key: self.build_key(primary_namespace, secondary_namespace, key)?, + key: self.build_key(primary_namespace, secondary_namespace, key), }; let resp = self.client.get_object(&request).await.map_err(|e| { let msg = format!( @@ -211,7 +209,7 @@ impl VssStoreInner { store_id: self.store_id.clone(), global_version: None, transaction_items: vec![KeyValue { - key: self.build_key(primary_namespace, secondary_namespace, key)?, + key: self.build_key(primary_namespace, secondary_namespace, key), version, value: storable.encode_to_vec(), }], @@ -236,7 +234,7 @@ impl VssStoreInner { let request = DeleteObjectRequest { store_id: self.store_id.clone(), key_value: Some(KeyValue { - key: self.build_key(primary_namespace, secondary_namespace, key)?, + key: self.build_key(primary_namespace, secondary_namespace, key), version: -1, value: vec![], }), From 2b117f5584548f40ad024dfe54ae3c6c739cfeab Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 12 Sep 2025 10:47:46 +0200 Subject: [PATCH 091/184] Implement `KVStore` for `VssStore` We implement the async `KVStore` trait for `VssStore`. --- src/io/vss_store.rs | 283 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 234 insertions(+), 49 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 7c3ccdd86..b71d4144e 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -5,16 +5,22 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::boxed::Box; +use std::collections::HashMap; +use std::future::Future; #[cfg(test)] use std::panic::RefUnwindSafe; -use std::sync::Arc; +use std::pin::Pin; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; use std::time::Duration; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::io::{self, Error, ErrorKind}; -use lightning::util::persist::KVStoreSync; +use lightning::util::persist::{KVStore, KVStoreSync}; use prost::Message; use rand::RngCore; +use tokio::sync::RwLock; use vss_client::client::VssClient; use vss_client::error::VssError; use vss_client::headers::VssHeaderProvider; @@ -42,6 +48,9 @@ type CustomRetryPolicy = FilteredRetryPolicy< /// A [`KVStoreSync`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. pub struct VssStore { inner: Arc, + // Version counter to ensure that writes are applied in the correct order. It is assumed that read and list + // operations aren't sensitive to the order of execution. + next_version: AtomicU64, runtime: Arc, } @@ -51,7 +60,32 @@ impl VssStore { header_provider: Arc, runtime: Arc, ) -> Self { let inner = Arc::new(VssStoreInner::new(base_url, store_id, vss_seed, header_provider)); - Self { inner, runtime } + let next_version = AtomicU64::new(1); + Self { inner, next_version, runtime } + } + + // Same logic as for the obfuscated keys below, but just for locking, using the plaintext keys + fn build_locking_key( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> String { + if primary_namespace.is_empty() { + key.to_owned() + } else { + format!("{}#{}#{}", primary_namespace, secondary_namespace, key) + } + } + + fn get_new_version_and_lock_ref(&self, locking_key: String) -> (Arc>, u64) { + let version = self.next_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("VssStore version counter overflowed"); + } + + // Get a reference to the inner lock. We do this early so that the arc can double as an in-flight counter for + // cleaning up unused locks. + let inner_lock_ref = self.inner.get_inner_lock_ref(locking_key); + + (inner_lock_ref, version) } } @@ -66,14 +100,34 @@ impl KVStoreSync for VssStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { - let fut = self.inner.write_internal(primary_namespace, secondary_namespace, key, buf); + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let fut = self.inner.write_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ); self.runtime.block_on(fut) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> io::Result<()> { - let fut = self.inner.remove_internal(primary_namespace, secondary_namespace, key, lazy); + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let fut = self.inner.remove_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + ); self.runtime.block_on(fut) } @@ -83,11 +137,82 @@ impl KVStoreSync for VssStore { } } +impl KVStore for VssStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + Box::pin(async move { + inner.read_internal(&primary_namespace, &secondary_namespace, &key).await + }) + } + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + Box::pin(async move { + inner + .write_internal( + inner_lock_ref, + locking_key, + version, + &primary_namespace, + &secondary_namespace, + &key, + buf, + ) + .await + }) + } + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + Send>> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + Box::pin(async move { + inner + .remove_internal( + inner_lock_ref, + locking_key, + version, + &primary_namespace, + &secondary_namespace, + &key, + lazy, + ) + .await + }) + } + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let inner = Arc::clone(&self.inner); + Box::pin(async move { inner.list_internal(&primary_namespace, &secondary_namespace).await }) + } +} + struct VssStoreInner { client: VssClient, store_id: String, storable_builder: StorableBuilder, key_obfuscator: KeyObfuscator, + // Per-key locks that ensures that we don't have concurrent writes to the same namespace/key. + // The lock also encapsulates the latest written version per key. + locks: Mutex>>>, } impl VssStoreInner { @@ -113,10 +238,18 @@ impl VssStoreInner { }) as _); let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); - Self { client, store_id, storable_builder, key_obfuscator } + let locks = Mutex::new(HashMap::new()); + Self { client, store_id, storable_builder, key_obfuscator, locks } } - fn build_key(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> String { + fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) + } + + fn build_obfuscated_key( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> String { let obfuscated_key = self.key_obfuscator.obfuscate(key); if primary_namespace.is_empty() { obfuscated_key @@ -171,10 +304,9 @@ impl VssStoreInner { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; - let request = GetObjectRequest { - store_id: self.store_id.clone(), - key: self.build_key(primary_namespace, secondary_namespace, key), - }; + + let obfuscated_key = self.build_obfuscated_key(primary_namespace, secondary_namespace, key); + let request = GetObjectRequest { store_id: self.store_id.clone(), key: obfuscated_key }; let resp = self.client.get_object(&request).await.map_err(|e| { let msg = format!( "Failed to read from key {}/{}/{}: {}", @@ -200,55 +332,65 @@ impl VssStoreInner { } async fn write_internal( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; - let version = -1; - let storable = self.storable_builder.build(buf, version); - let request = PutObjectRequest { - store_id: self.store_id.clone(), - global_version: None, - transaction_items: vec![KeyValue { - key: self.build_key(primary_namespace, secondary_namespace, key), - version, - value: storable.encode_to_vec(), - }], - delete_items: vec![], - }; - self.client.put_object(&request).await.map_err(|e| { - let msg = format!( - "Failed to write to key {}/{}/{}: {}", - primary_namespace, secondary_namespace, key, e - ); - Error::new(ErrorKind::Other, msg) - })?; + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { + let obfuscated_key = + self.build_obfuscated_key(primary_namespace, secondary_namespace, key); + let vss_version = -1; + let storable = self.storable_builder.build(buf, vss_version); + let request = PutObjectRequest { + store_id: self.store_id.clone(), + global_version: None, + transaction_items: vec![KeyValue { + key: obfuscated_key, + version: vss_version, + value: storable.encode_to_vec(), + }], + delete_items: vec![], + }; - Ok(()) + self.client.put_object(&request).await.map_err(|e| { + let msg = format!( + "Failed to write to key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + Error::new(ErrorKind::Other, msg) + })?; + + Ok(()) + }) + .await } async fn remove_internal( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; - let request = DeleteObjectRequest { - store_id: self.store_id.clone(), - key_value: Some(KeyValue { - key: self.build_key(primary_namespace, secondary_namespace, key), - version: -1, - value: vec![], - }), - }; - self.client.delete_object(&request).await.map_err(|e| { - let msg = format!( - "Failed to delete key {}/{}/{}: {}", - primary_namespace, secondary_namespace, key, e - ); - Error::new(ErrorKind::Other, msg) - })?; + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { + let obfuscated_key = + self.build_obfuscated_key(primary_namespace, secondary_namespace, key); + let request = DeleteObjectRequest { + store_id: self.store_id.clone(), + key_value: Some(KeyValue { key: obfuscated_key, version: -1, value: vec![] }), + }; - Ok(()) + self.client.delete_object(&request).await.map_err(|e| { + let msg = format!( + "Failed to delete key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + Error::new(ErrorKind::Other, msg) + })?; + + Ok(()) + }) + .await } async fn list_internal( @@ -267,6 +409,49 @@ impl VssStoreInner { Ok(keys) } + + async fn execute_locked_write< + F: Future>, + FN: FnOnce() -> F, + >( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, callback: FN, + ) -> Result<(), lightning::io::Error> { + let res = { + let mut last_written_version = inner_lock_ref.write().await; + + // Check if we already have a newer version written/removed. This is used in async contexts to realize eventual + // consistency. + let is_stale_version = version <= *last_written_version; + + // If the version is not stale, we execute the callback. Otherwise we can and must skip writing. + if is_stale_version { + Ok(()) + } else { + callback().await.map(|_| { + *last_written_version = version; + }) + } + }; + + self.clean_locks(&inner_lock_ref, locking_key); + + res + } + + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { + // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry + // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in + // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already + // counted. + let mut outer_lock = self.locks.lock().unwrap(); + + let strong_count = Arc::strong_count(&inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected VssStore strong count"); + + if strong_count == 2 { + outer_lock.remove(&locking_key); + } + } } fn derive_data_encryption_and_obfuscation_keys(vss_seed: &[u8; 32]) -> ([u8; 32], [u8; 32]) { From ce731688acc24c70266349fd57e383ade5d9590b Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 7 Oct 2025 10:42:38 +0200 Subject: [PATCH 092/184] f No need to use `RwLock` if we ever only `write` it --- src/io/vss_store.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index b71d4144e..c5442d1ac 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -20,7 +20,6 @@ use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::{KVStore, KVStoreSync}; use prost::Message; use rand::RngCore; -use tokio::sync::RwLock; use vss_client::client::VssClient; use vss_client::error::VssError; use vss_client::headers::VssHeaderProvider; @@ -75,7 +74,9 @@ impl VssStore { } } - fn get_new_version_and_lock_ref(&self, locking_key: String) -> (Arc>, u64) { + fn get_new_version_and_lock_ref( + &self, locking_key: String, + ) -> (Arc>, u64) { let version = self.next_version.fetch_add(1, Ordering::Relaxed); if version == u64::MAX { panic!("VssStore version counter overflowed"); @@ -212,7 +213,7 @@ struct VssStoreInner { key_obfuscator: KeyObfuscator, // Per-key locks that ensures that we don't have concurrent writes to the same namespace/key. // The lock also encapsulates the latest written version per key. - locks: Mutex>>>, + locks: Mutex>>>, } impl VssStoreInner { @@ -242,7 +243,7 @@ impl VssStoreInner { Self { client, store_id, storable_builder, key_obfuscator, locks } } - fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { + fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { let mut outer_lock = self.locks.lock().unwrap(); Arc::clone(&outer_lock.entry(locking_key).or_default()) } @@ -332,7 +333,7 @@ impl VssStoreInner { } async fn write_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; @@ -367,7 +368,7 @@ impl VssStoreInner { } async fn remove_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; @@ -414,10 +415,11 @@ impl VssStoreInner { F: Future>, FN: FnOnce() -> F, >( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, callback: FN, + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + callback: FN, ) -> Result<(), lightning::io::Error> { let res = { - let mut last_written_version = inner_lock_ref.write().await; + let mut last_written_version = inner_lock_ref.lock().await; // Check if we already have a newer version written/removed. This is used in async contexts to realize eventual // consistency. @@ -438,7 +440,7 @@ impl VssStoreInner { res } - fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already From 76c75fe590495f9ac62fd6196df3402d0d168e5a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 12 Sep 2025 11:00:04 +0200 Subject: [PATCH 093/184] Move `SqliteStore` logic to `_internal` methods .. to be easier reusable via `KVStore` also --- src/io/sqlite_store/mod.rs | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index d18c7440d..582dd831b 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -126,10 +126,8 @@ impl SqliteStore { pub fn get_data_dir(&self) -> PathBuf { self.data_dir.clone() } -} -impl KVStoreSync for SqliteStore { - fn read( + fn read_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; @@ -177,7 +175,7 @@ impl KVStoreSync for SqliteStore { Ok(res) } - fn write( + fn write_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; @@ -213,7 +211,7 @@ impl KVStoreSync for SqliteStore { }) } - fn remove( + fn remove_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; @@ -245,7 +243,9 @@ impl KVStoreSync for SqliteStore { Ok(()) } - fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + fn list_internal( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; let locked_conn = self.connection.lock().unwrap(); @@ -285,6 +285,30 @@ impl KVStoreSync for SqliteStore { } } +impl KVStoreSync for SqliteStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + self.write_internal(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + self.remove_internal(primary_namespace, secondary_namespace, key, lazy) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.list_internal(primary_namespace, secondary_namespace) + } +} + #[cfg(test)] mod tests { use super::*; From c0473cc98da39df40954242a543277578d8cc6b5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 12 Sep 2025 11:18:42 +0200 Subject: [PATCH 094/184] Split `SqliteStore` into `SqliteStore` and `SqliteStoreInner` .. where the former holds the latter in an `Arc` that can be used in async/`Future` contexts more easily. --- src/io/sqlite_store/mod.rs | 79 ++++++++++++++++++++++---------------- 1 file changed, 46 insertions(+), 33 deletions(-) diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 582dd831b..2ab5c11a6 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -37,9 +37,7 @@ const SCHEMA_USER_VERSION: u16 = 2; /// /// [SQLite]: https://sqlite.org pub struct SqliteStore { - connection: Arc>, - data_dir: PathBuf, - kv_table_name: String, + inner: Arc, } impl SqliteStore { @@ -51,6 +49,50 @@ impl SqliteStore { /// Similarly, the given `kv_table_name` will be used or default to [`DEFAULT_KV_TABLE_NAME`]. pub fn new( data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, + ) -> io::Result { + let inner = Arc::new(SqliteStoreInner::new(data_dir, db_file_name, kv_table_name)?); + Ok(Self { inner }) + } + + /// Returns the data directory. + pub fn get_data_dir(&self) -> PathBuf { + self.inner.data_dir.clone() + } +} + +impl KVStoreSync for SqliteStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.inner.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + self.inner.write_internal(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + self.inner.remove_internal(primary_namespace, secondary_namespace, key, lazy) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.inner.list_internal(primary_namespace, secondary_namespace) + } +} + +struct SqliteStoreInner { + connection: Arc>, + data_dir: PathBuf, + kv_table_name: String, +} + +impl SqliteStoreInner { + fn new( + data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, ) -> io::Result { let db_file_name = db_file_name.unwrap_or(DEFAULT_SQLITE_DB_FILE_NAME.to_string()); let kv_table_name = kv_table_name.unwrap_or(DEFAULT_KV_TABLE_NAME.to_string()); @@ -122,11 +164,6 @@ impl SqliteStore { Ok(Self { connection, data_dir, kv_table_name }) } - /// Returns the data directory. - pub fn get_data_dir(&self) -> PathBuf { - self.data_dir.clone() - } - fn read_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { @@ -285,30 +322,6 @@ impl SqliteStore { } } -impl KVStoreSync for SqliteStore { - fn read( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> io::Result> { - self.read_internal(primary_namespace, secondary_namespace, key) - } - - fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> io::Result<()> { - self.write_internal(primary_namespace, secondary_namespace, key, buf) - } - - fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> io::Result<()> { - self.remove_internal(primary_namespace, secondary_namespace, key, lazy) - } - - fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { - self.list_internal(primary_namespace, secondary_namespace) - } -} - #[cfg(test)] mod tests { use super::*; @@ -318,7 +331,7 @@ mod tests { impl Drop for SqliteStore { fn drop(&mut self) { - match fs::remove_dir_all(&self.data_dir) { + match fs::remove_dir_all(&self.inner.data_dir) { Err(e) => println!("Failed to remove test store directory: {}", e), _ => {}, } From 5c09a31d9d2c00529110537182222063753ea596 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 7 Oct 2025 11:13:06 +0200 Subject: [PATCH 095/184] Bump BDK to 2.2, rust-bitcoin to 0.32.7 --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b639b7dc1..30864e917 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,12 +79,12 @@ lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls-ring"]} -bdk_wallet = { version = "2.0.0", default-features = false, features = ["std", "keys-bip39"]} +bdk_wallet = { version = "2.2.0", default-features = false, features = ["std", "keys-bip39"]} reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } rustls = { version = "0.23", default-features = false } rusqlite = { version = "0.31.0", features = ["bundled"] } -bitcoin = "0.32.4" +bitcoin = "0.32.7" bip39 = "2.0.0" bip21 = { version = "0.5", features = ["std"], default-features = false } From 496552b108340e9263d9ebed220826b5b00ef41a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 12 Sep 2025 11:42:47 +0200 Subject: [PATCH 096/184] Implement `KVStore` for `SqliteStore` --- src/io/sqlite_store/mod.rs | 301 ++++++++++++++++++++++++++++++------- 1 file changed, 248 insertions(+), 53 deletions(-) diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 2ab5c11a6..6ba41f714 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -6,12 +6,17 @@ // accordance with one or both of these licenses. //! Objects related to [`SqliteStore`] live here. +use std::boxed::Box; +use std::collections::HashMap; use std::fs; +use std::future::Future; use std::path::PathBuf; +use std::pin::Pin; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use lightning::io; -use lightning::util::persist::KVStoreSync; +use lightning::util::persist::{KVStore, KVStoreSync}; use lightning_types::string::PrintableString; use rusqlite::{named_params, Connection}; @@ -38,6 +43,10 @@ const SCHEMA_USER_VERSION: u16 = 2; /// [SQLite]: https://sqlite.org pub struct SqliteStore { inner: Arc, + + // Version counter to ensure that writes are applied in the correct order. It is assumed that read and list + // operations aren't sensitive to the order of execution. + next_write_version: AtomicU64, } impl SqliteStore { @@ -51,7 +60,27 @@ impl SqliteStore { data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, ) -> io::Result { let inner = Arc::new(SqliteStoreInner::new(data_dir, db_file_name, kv_table_name)?); - Ok(Self { inner }) + let next_write_version = AtomicU64::new(1); + Ok(Self { inner, next_write_version }) + } + + fn build_locking_key( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> String { + format!("{}#{}#{}", primary_namespace, secondary_namespace, key) + } + + fn get_new_version_and_lock_ref(&self, locking_key: String) -> (Arc>, u64) { + let version = self.next_write_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("SqliteStore version counter overflowed"); + } + + // Get a reference to the inner lock. We do this early so that the arc can double as an in-flight counter for + // cleaning up unused locks. + let inner_lock_ref = self.inner.get_inner_lock_ref(locking_key); + + (inner_lock_ref, version) } /// Returns the data directory. @@ -60,6 +89,99 @@ impl SqliteStore { } } +impl KVStore for SqliteStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.read_internal(&primary_namespace, &secondary_namespace, &key) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.write_internal( + inner_lock_ref, + locking_key, + version, + &primary_namespace, + &secondary_namespace, + &key, + buf, + ) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + Send>> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.remove_internal( + inner_lock_ref, + locking_key, + version, + &primary_namespace, + &secondary_namespace, + &key, + lazy, + ) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.list_internal(&primary_namespace, &secondary_namespace) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } +} + impl KVStoreSync for SqliteStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, @@ -70,13 +192,33 @@ impl KVStoreSync for SqliteStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { - self.inner.write_internal(primary_namespace, secondary_namespace, key, buf) + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + self.inner.write_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> io::Result<()> { - self.inner.remove_internal(primary_namespace, secondary_namespace, key, lazy) + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + self.inner.remove_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + ) } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { @@ -88,6 +230,7 @@ struct SqliteStoreInner { connection: Arc>, data_dir: PathBuf, kv_table_name: String, + write_version_locks: Mutex>>>, } impl SqliteStoreInner { @@ -161,7 +304,13 @@ impl SqliteStoreInner { })?; let connection = Arc::new(Mutex::new(connection)); - Ok(Self { connection, data_dir, kv_table_name }) + let write_version_locks = Mutex::new(HashMap::new()); + Ok(Self { connection, data_dir, kv_table_name, write_version_locks }) + } + + fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.write_version_locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) } fn read_internal( @@ -213,71 +362,77 @@ impl SqliteStoreInner { } fn write_internal( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; - let locked_conn = self.connection.lock().unwrap(); + self.execute_locked_write(inner_lock_ref, locking_key, version, || { + let locked_conn = self.connection.lock().unwrap(); - let sql = format!( - "INSERT OR REPLACE INTO {} (primary_namespace, secondary_namespace, key, value) VALUES (:primary_namespace, :secondary_namespace, :key, :value);", - self.kv_table_name - ); + let sql = format!( + "INSERT OR REPLACE INTO {} (primary_namespace, secondary_namespace, key, value) VALUES (:primary_namespace, :secondary_namespace, :key, :value);", + self.kv_table_name + ); - let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { - let msg = format!("Failed to prepare statement: {}", e); - io::Error::new(io::ErrorKind::Other, msg) - })?; + let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { + let msg = format!("Failed to prepare statement: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; - stmt.execute(named_params! { - ":primary_namespace": primary_namespace, - ":secondary_namespace": secondary_namespace, - ":key": key, - ":value": buf, - }) - .map(|_| ()) - .map_err(|e| { - let msg = format!( - "Failed to write to key {}/{}/{}: {}", - PrintableString(primary_namespace), - PrintableString(secondary_namespace), - PrintableString(key), - e - ); - io::Error::new(io::ErrorKind::Other, msg) + stmt.execute(named_params! { + ":primary_namespace": primary_namespace, + ":secondary_namespace": secondary_namespace, + ":key": key, + ":value": buf, + }) + .map(|_| ()) + .map_err(|e| { + let msg = format!( + "Failed to write to key {}/{}/{}: {}", + PrintableString(primary_namespace), + PrintableString(secondary_namespace), + PrintableString(key), + e + ); + io::Error::new(io::ErrorKind::Other, msg) + }) }) } fn remove_internal( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; - let locked_conn = self.connection.lock().unwrap(); + self.execute_locked_write(inner_lock_ref, locking_key, version, || { + let locked_conn = self.connection.lock().unwrap(); - let sql = format!("DELETE FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace AND key=:key;", self.kv_table_name); + let sql = format!("DELETE FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace AND key=:key;", self.kv_table_name); - let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { - let msg = format!("Failed to prepare statement: {}", e); - io::Error::new(io::ErrorKind::Other, msg) - })?; + let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { + let msg = format!("Failed to prepare statement: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; - stmt.execute(named_params! { - ":primary_namespace": primary_namespace, - ":secondary_namespace": secondary_namespace, - ":key": key, + stmt.execute(named_params! { + ":primary_namespace": primary_namespace, + ":secondary_namespace": secondary_namespace, + ":key": key, + }) + .map_err(|e| { + let msg = format!( + "Failed to delete key {}/{}/{}: {}", + PrintableString(primary_namespace), + PrintableString(secondary_namespace), + PrintableString(key), + e + ); + io::Error::new(io::ErrorKind::Other, msg) + })?; + Ok(()) }) - .map_err(|e| { - let msg = format!( - "Failed to delete key {}/{}/{}: {}", - PrintableString(primary_namespace), - PrintableString(secondary_namespace), - PrintableString(key), - e - ); - io::Error::new(io::ErrorKind::Other, msg) - })?; - Ok(()) } fn list_internal( @@ -320,6 +475,46 @@ impl SqliteStoreInner { Ok(keys) } + + fn execute_locked_write Result<(), lightning::io::Error>>( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, callback: F, + ) -> Result<(), lightning::io::Error> { + let res = { + let mut last_written_version = inner_lock_ref.lock().unwrap(); + + // Check if we already have a newer version written/removed. This is used in async contexts to realize eventual + // consistency. + let is_stale_version = version <= *last_written_version; + + // If the version is not stale, we execute the callback. Otherwise we can and must skip writing. + if is_stale_version { + Ok(()) + } else { + callback().map(|_| { + *last_written_version = version; + }) + } + }; + + self.clean_locks(&inner_lock_ref, locking_key); + + res + } + + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { + // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry + // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in + // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already + // counted. + let mut outer_lock = self.write_version_locks.lock().unwrap(); + + let strong_count = Arc::strong_count(&inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected SqliteStore strong count"); + + if strong_count == 2 { + outer_lock.remove(&locking_key); + } + } } #[cfg(test)] From f535b50e97da73e39f1f19b90b3a28228cdb9e9d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 18 Sep 2025 11:13:46 +0200 Subject: [PATCH 097/184] Move `TestStoreSync` logic to `_internal` methods .. to be easier reusable via `KVStore` also --- tests/common/mod.rs | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 1331fc047..3e1cf899f 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1242,10 +1242,8 @@ impl TestSyncStore { }, } } -} -impl KVStoreSync for TestSyncStore { - fn read( + fn read_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> lightning::io::Result> { let _guard = self.serializer.read().unwrap(); @@ -1270,7 +1268,7 @@ impl KVStoreSync for TestSyncStore { } } - fn write( + fn write_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); @@ -1299,7 +1297,7 @@ impl KVStoreSync for TestSyncStore { } } - fn remove( + fn remove_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); @@ -1327,10 +1325,36 @@ impl KVStoreSync for TestSyncStore { } } - fn list( + fn list_internal( &self, primary_namespace: &str, secondary_namespace: &str, ) -> lightning::io::Result> { let _guard = self.serializer.read().unwrap(); self.do_list(primary_namespace, secondary_namespace) } } + +impl KVStoreSync for TestSyncStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> lightning::io::Result> { + self.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> lightning::io::Result<()> { + self.write_internal(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> lightning::io::Result<()> { + self.remove_internal(primary_namespace, secondary_namespace, key, lazy) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> lightning::io::Result> { + self.list_internal(primary_namespace, secondary_namespace) + } +} From b2fca59f65dbc9af56275fa756b013a1aa27c9e9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 18 Sep 2025 11:16:36 +0200 Subject: [PATCH 098/184] Split `TestSyncStore` into `TestSyncStore` and `TestSyncStoreInner` .. where the former holds the latter in an `Arc` that can be used in async/`Future` contexts more easily. --- tests/common/mod.rs | 67 ++++++++++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 28 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 3e1cf899f..3e72a3df4 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1190,14 +1190,51 @@ pub(crate) fn do_channel_full_cycle( // A `KVStore` impl for testing purposes that wraps all our `KVStore`s and asserts their synchronicity. pub(crate) struct TestSyncStore { + inner: Arc, +} + +impl TestSyncStore { + pub(crate) fn new(dest_dir: PathBuf) -> Self { + let inner = Arc::new(TestSyncStoreInner::new(dest_dir)); + Self { inner } + } +} + +impl KVStoreSync for TestSyncStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> lightning::io::Result> { + self.inner.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> lightning::io::Result<()> { + self.inner.write_internal(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> lightning::io::Result<()> { + self.inner.remove_internal(primary_namespace, secondary_namespace, key, lazy) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> lightning::io::Result> { + self.inner.list_internal(primary_namespace, secondary_namespace) + } +} + +struct TestSyncStoreInner { serializer: RwLock<()>, test_store: TestStore, fs_store: FilesystemStore, sqlite_store: SqliteStore, } -impl TestSyncStore { - pub(crate) fn new(dest_dir: PathBuf) -> Self { +impl TestSyncStoreInner { + fn new(dest_dir: PathBuf) -> Self { let serializer = RwLock::new(()); let mut fs_dir = dest_dir.clone(); fs_dir.push("fs_store"); @@ -1332,29 +1369,3 @@ impl TestSyncStore { self.do_list(primary_namespace, secondary_namespace) } } - -impl KVStoreSync for TestSyncStore { - fn read( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> lightning::io::Result> { - self.read_internal(primary_namespace, secondary_namespace, key) - } - - fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> lightning::io::Result<()> { - self.write_internal(primary_namespace, secondary_namespace, key, buf) - } - - fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> lightning::io::Result<()> { - self.remove_internal(primary_namespace, secondary_namespace, key, lazy) - } - - fn list( - &self, primary_namespace: &str, secondary_namespace: &str, - ) -> lightning::io::Result> { - self.list_internal(primary_namespace, secondary_namespace) - } -} From 8aa68ff00605828570c11404936b818625ec41ea Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 18 Sep 2025 11:18:54 +0200 Subject: [PATCH 099/184] Implement `KVStore` for `TestSyncStore` --- tests/common/mod.rs | 137 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 121 insertions(+), 16 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 3e72a3df4..817d0edc5 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -10,9 +10,12 @@ pub(crate) mod logging; +use std::boxed::Box; use std::collections::{HashMap, HashSet}; use std::env; +use std::future::Future; use std::path::PathBuf; +use std::pin::Pin; use std::sync::{Arc, RwLock}; use std::time::Duration; @@ -31,9 +34,10 @@ use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, }; +use lightning::io; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; -use lightning::util::persist::KVStoreSync; +use lightning::util::persist::{KVStore, KVStoreSync}; use lightning::util::test_utils::TestStore; use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_persister::fs_store::FilesystemStore; @@ -1200,6 +1204,76 @@ impl TestSyncStore { } } +impl KVStore for TestSyncStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.read_internal(&primary_namespace, &secondary_namespace, &key) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.write_internal(&primary_namespace, &secondary_namespace, &key, buf) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.list_internal(&primary_namespace, &secondary_namespace) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } +} + impl KVStoreSync for TestSyncStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, @@ -1254,9 +1328,10 @@ impl TestSyncStoreInner { fn do_list( &self, primary_namespace: &str, secondary_namespace: &str, ) -> lightning::io::Result> { - let fs_res = self.fs_store.list(primary_namespace, secondary_namespace); - let sqlite_res = self.sqlite_store.list(primary_namespace, secondary_namespace); - let test_res = self.test_store.list(primary_namespace, secondary_namespace); + let fs_res = KVStoreSync::list(&self.fs_store, primary_namespace, secondary_namespace); + let sqlite_res = + KVStoreSync::list(&self.sqlite_store, primary_namespace, secondary_namespace); + let test_res = KVStoreSync::list(&self.test_store, primary_namespace, secondary_namespace); match fs_res { Ok(mut list) => { @@ -1285,9 +1360,11 @@ impl TestSyncStoreInner { ) -> lightning::io::Result> { let _guard = self.serializer.read().unwrap(); - let fs_res = self.fs_store.read(primary_namespace, secondary_namespace, key); - let sqlite_res = self.sqlite_store.read(primary_namespace, secondary_namespace, key); - let test_res = self.test_store.read(primary_namespace, secondary_namespace, key); + let fs_res = KVStoreSync::read(&self.fs_store, primary_namespace, secondary_namespace, key); + let sqlite_res = + KVStoreSync::read(&self.sqlite_store, primary_namespace, secondary_namespace, key); + let test_res = + KVStoreSync::read(&self.test_store, primary_namespace, secondary_namespace, key); match fs_res { Ok(read) => { @@ -1309,11 +1386,27 @@ impl TestSyncStoreInner { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); - let fs_res = self.fs_store.write(primary_namespace, secondary_namespace, key, buf.clone()); - let sqlite_res = - self.sqlite_store.write(primary_namespace, secondary_namespace, key, buf.clone()); - let test_res = - self.test_store.write(primary_namespace, secondary_namespace, key, buf.clone()); + let fs_res = KVStoreSync::write( + &self.fs_store, + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ); + let sqlite_res = KVStoreSync::write( + &self.sqlite_store, + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ); + let test_res = KVStoreSync::write( + &self.test_store, + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ); assert!(self .do_list(primary_namespace, secondary_namespace) @@ -1338,10 +1431,22 @@ impl TestSyncStoreInner { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); - let fs_res = self.fs_store.remove(primary_namespace, secondary_namespace, key, lazy); - let sqlite_res = - self.sqlite_store.remove(primary_namespace, secondary_namespace, key, lazy); - let test_res = self.test_store.remove(primary_namespace, secondary_namespace, key, lazy); + let fs_res = + KVStoreSync::remove(&self.fs_store, primary_namespace, secondary_namespace, key, lazy); + let sqlite_res = KVStoreSync::remove( + &self.sqlite_store, + primary_namespace, + secondary_namespace, + key, + lazy, + ); + let test_res = KVStoreSync::remove( + &self.test_store, + primary_namespace, + secondary_namespace, + key, + lazy, + ); assert!(!self .do_list(primary_namespace, secondary_namespace) From c9e3f7150718dbd5e6bd13b702cfb85f30b47f94 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 17 Sep 2025 12:57:25 +0200 Subject: [PATCH 100/184] Require both types of `KVStore` As an intermediary step, we require any store to implement both `KVStore` and `KVStoreSync`, allowing us to switch over step-by-step. We already switch to the fully-async background processor variant here. --- Cargo.toml | 8 +- src/builder.rs | 7 +- src/data_store.rs | 49 ++++--- src/event.rs | 45 +++--- src/io/utils.rs | 128 ++++++++++-------- src/lib.rs | 41 +++--- .../asynchronous/static_invoice_store.rs | 59 ++++---- src/peer_store.rs | 59 ++++---- src/types.rs | 18 ++- tests/integration_tests_rust.rs | 5 +- 10 files changed, 236 insertions(+), 183 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b639b7dc1..7385b5c46 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ default = [] #lightning-types = { version = "0.2.0" } #lightning-invoice = { version = "0.33.0", features = ["std"] } #lightning-net-tokio = { version = "0.1.0" } -#lightning-persister = { version = "0.1.0" } +#lightning-persister = { version = "0.1.0", features = ["tokio"] } #lightning-background-processor = { version = "0.1.0" } #lightning-rapid-gossip-sync = { version = "0.1.0" } #lightning-block-sync = { version = "0.1.0", features = ["rest-client", "rpc-client", "tokio"] } @@ -44,7 +44,7 @@ default = [] #lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } #lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["tokio"] } #lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["rest-client", "rpc-client", "tokio"] } @@ -56,7 +56,7 @@ lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = " lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["std"] } lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["tokio"] } lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["rest-client", "rpc-client", "tokio"] } @@ -68,7 +68,7 @@ lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", #lightning-types = { path = "../rust-lightning/lightning-types" } #lightning-invoice = { path = "../rust-lightning/lightning-invoice", features = ["std"] } #lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } -#lightning-persister = { path = "../rust-lightning/lightning-persister" } +#lightning-persister = { path = "../rust-lightning/lightning-persister", features = ["tokio"] } #lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } #lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } #lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rest-client", "rpc-client", "tokio"] } diff --git a/src/builder.rs b/src/builder.rs index 0b3ea3101..3396a52a0 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -31,7 +31,7 @@ use lightning::routing::scoring::{ }; use lightning::sign::{EntropySource, NodeSigner}; use lightning::util::persist::{ - read_channel_monitors, CHANNEL_MANAGER_PERSISTENCE_KEY, + read_channel_monitors, KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::ReadableArgs; @@ -1419,7 +1419,8 @@ fn build_with_store_internal( // Initialize the ChannelManager let channel_manager = { - if let Ok(res) = kv_store.read( + if let Ok(res) = KVStoreSync::read( + &*kv_store, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, @@ -1657,7 +1658,7 @@ fn build_with_store_internal( Ok(output_sweeper) => Arc::new(output_sweeper), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { - Arc::new(OutputSweeper::new_with_kv_store_sync( + Arc::new(OutputSweeper::new( channel_manager.current_best_block(), Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator), diff --git a/src/data_store.rs b/src/data_store.rs index f9dbaa788..83cbf4476 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -9,6 +9,7 @@ use std::collections::{hash_map, HashMap}; use std::ops::Deref; use std::sync::{Arc, Mutex}; +use lightning::util::persist::KVStoreSync; use lightning::util::ser::{Readable, Writeable}; use crate::logger::{log_error, LdkLogger}; @@ -97,19 +98,24 @@ where let removed = self.objects.lock().unwrap().remove(id).is_some(); if removed { let store_key = id.encode_to_hex_str(); - self.kv_store - .remove(&self.primary_namespace, &self.secondary_namespace, &store_key, false) - .map_err(|e| { - log_error!( - self.logger, - "Removing object data for key {}/{}/{} failed due to: {}", - &self.primary_namespace, - &self.secondary_namespace, - store_key, - e - ); - Error::PersistenceFailed - })?; + KVStoreSync::remove( + &*self.kv_store, + &self.primary_namespace, + &self.secondary_namespace, + &store_key, + false, + ) + .map_err(|e| { + log_error!( + self.logger, + "Removing object data for key {}/{}/{} failed due to: {}", + &self.primary_namespace, + &self.secondary_namespace, + store_key, + e + ); + Error::PersistenceFailed + })?; } Ok(()) } @@ -141,9 +147,14 @@ where fn persist(&self, object: &SO) -> Result<(), Error> { let store_key = object.id().encode_to_hex_str(); let data = object.encode(); - self.kv_store - .write(&self.primary_namespace, &self.secondary_namespace, &store_key, data) - .map_err(|e| { + KVStoreSync::write( + &*self.kv_store, + &self.primary_namespace, + &self.secondary_namespace, + &store_key, + data, + ) + .map_err(|e| { log_error!( self.logger, "Write for key {}/{}/{} failed due to: {}", @@ -241,13 +252,15 @@ mod tests { let store_key = id.encode_to_hex_str(); // Check we start empty. - assert!(store.read(&primary_namespace, &secondary_namespace, &store_key).is_err()); + assert!(KVStoreSync::read(&*store, &primary_namespace, &secondary_namespace, &store_key) + .is_err()); // Check we successfully store an object and return `false` let object = TestObject { id, data: [23u8; 3] }; assert_eq!(Ok(false), data_store.insert(object.clone())); assert_eq!(Some(object), data_store.get(&id)); - assert!(store.read(&primary_namespace, &secondary_namespace, &store_key).is_ok()); + assert!(KVStoreSync::read(&*store, &primary_namespace, &secondary_namespace, &store_key) + .is_ok()); // Test re-insertion returns `true` let mut override_object = object.clone(); diff --git a/src/event.rs b/src/event.rs index 1236c7cf2..824cba694 100644 --- a/src/event.rs +++ b/src/event.rs @@ -26,6 +26,7 @@ use lightning::util::config::{ ChannelConfigOverrides, ChannelConfigUpdate, ChannelHandshakeConfigUpdate, }; use lightning::util::errors::APIError; +use lightning::util::persist::KVStoreSync; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_types::payment::{PaymentHash, PaymentPreimage}; @@ -348,24 +349,24 @@ where fn persist_queue(&self, locked_queue: &VecDeque) -> Result<(), Error> { let data = EventQueueSerWrapper(locked_queue).encode(); - self.kv_store - .write( + KVStoreSync::write( + &*self.kv_store, + EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_KEY, + data, + ) + .map_err(|e| { + log_error!( + self.logger, + "Write for key {}/{}/{} failed due to: {}", EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, - data, - ) - .map_err(|e| { - log_error!( - self.logger, - "Write for key {}/{}/{} failed due to: {}", - EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_KEY, - e - ); - Error::PersistenceFailed - })?; + e + ); + Error::PersistenceFailed + })?; Ok(()) } } @@ -1620,13 +1621,13 @@ mod tests { } // Check we can read back what we persisted. - let persisted_bytes = store - .read( - EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_KEY, - ) - .unwrap(); + let persisted_bytes = KVStoreSync::read( + &*store, + EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_KEY, + ) + .unwrap(); let deser_event_queue = EventQueue::read(&mut &persisted_bytes[..], (Arc::clone(&store), logger)).unwrap(); assert_eq!(deser_event_queue.wait_next_event(), expected_event); diff --git a/src/io/utils.rs b/src/io/utils.rs index 0cc910ad7..cb3ca0847 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -24,11 +24,12 @@ use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringDecayParameters}; use lightning::util::persist::{ - KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, NETWORK_GRAPH_PERSISTENCE_KEY, - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_KEY, OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + KVStoreSync, KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, + NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, + OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use lightning::util::sweep::OutputSweeper; @@ -131,7 +132,8 @@ pub(crate) fn read_network_graph( where L::Target: LdkLogger, { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, @@ -150,7 +152,8 @@ where L::Target: LdkLogger, { let params = ProbabilisticScoringDecayParameters::default(); - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, @@ -169,7 +172,8 @@ pub(crate) fn read_event_queue( where L::Target: LdkLogger, { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, @@ -187,7 +191,8 @@ pub(crate) fn read_peer_info( where L::Target: LdkLogger, { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, PEER_INFO_PERSISTENCE_KEY, @@ -207,11 +212,13 @@ where { let mut res = Vec::new(); - for stored_key in kv_store.list( + for stored_key in KVStoreSync::list( + &*kv_store, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, )? { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key, @@ -234,7 +241,8 @@ pub(crate) fn read_output_sweeper( chain_data_source: Arc, keys_manager: Arc, kv_store: Arc, logger: Arc, ) -> Result { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, @@ -248,7 +256,7 @@ pub(crate) fn read_output_sweeper( kv_store, logger.clone(), ); - OutputSweeper::read_with_kv_store_sync(&mut reader, args).map_err(|e| { + OutputSweeper::read(&mut reader, args).map_err(|e| { log_error!(logger, "Failed to deserialize OutputSweeper: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize OutputSweeper") }) @@ -260,7 +268,8 @@ pub(crate) fn read_node_metrics( where L::Target: LdkLogger, { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, NODE_METRICS_KEY, @@ -278,24 +287,24 @@ where L::Target: LdkLogger, { let data = node_metrics.encode(); - kv_store - .write( + KVStoreSync::write( + &*kv_store, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, + data, + ) + .map_err(|e| { + log_error!( + logger, + "Writing data to key {}/{}/{} failed due to: {}", NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, NODE_METRICS_KEY, - data, - ) - .map_err(|e| { - log_error!( - logger, - "Writing data to key {}/{}/{} failed due to: {}", - NODE_METRICS_PRIMARY_NAMESPACE, - NODE_METRICS_SECONDARY_NAMESPACE, - NODE_METRICS_KEY, - e - ); - Error::PersistenceFailed - }) + e + ); + Error::PersistenceFailed + }) } pub(crate) fn is_valid_kvstore_str(key: &str) -> bool { @@ -397,24 +406,26 @@ macro_rules! impl_read_write_change_set_type { where L::Target: LdkLogger, { - let bytes = match kv_store.read($primary_namespace, $secondary_namespace, $key) { - Ok(bytes) => bytes, - Err(e) => { - if e.kind() == lightning::io::ErrorKind::NotFound { - return Ok(None); - } else { - log_error!( - logger, - "Reading data from key {}/{}/{} failed due to: {}", - $primary_namespace, - $secondary_namespace, - $key, - e - ); - return Err(e.into()); - } - }, - }; + let bytes = + match KVStoreSync::read(&*kv_store, $primary_namespace, $secondary_namespace, $key) + { + Ok(bytes) => bytes, + Err(e) => { + if e.kind() == lightning::io::ErrorKind::NotFound { + return Ok(None); + } else { + log_error!( + logger, + "Reading data from key {}/{}/{} failed due to: {}", + $primary_namespace, + $secondary_namespace, + $key, + e + ); + return Err(e.into()); + } + }, + }; let mut reader = Cursor::new(bytes); let res: Result, DecodeError> = @@ -438,17 +449,18 @@ macro_rules! impl_read_write_change_set_type { L::Target: LdkLogger, { let data = ChangeSetSerWrapper(value).encode(); - kv_store.write($primary_namespace, $secondary_namespace, $key, data).map_err(|e| { - log_error!( - logger, - "Writing data to key {}/{}/{} failed due to: {}", - $primary_namespace, - $secondary_namespace, - $key, - e - ); - e.into() - }) + KVStoreSync::write(&*kv_store, $primary_namespace, $secondary_namespace, $key, data) + .map_err(|e| { + log_error!( + logger, + "Writing data to key {}/{}/{} failed due to: {}", + $primary_namespace, + $secondary_namespace, + $key, + e + ); + e.into() + }) } }; } diff --git a/src/lib.rs b/src/lib.rs index a075cfac5..c235d2a88 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -134,7 +134,8 @@ use lightning::ln::channel_state::ChannelShutdownState; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; -use lightning_background_processor::process_events_async_with_kv_store_sync; +use lightning::util::persist::KVStoreSync; +use lightning_background_processor::process_events_async; use liquidity::{LSPS1Liquidity, LiquiditySource}; use logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use payment::asynchronous::om_mailbox::OnionMessageMailbox; @@ -147,10 +148,12 @@ use peer_store::{PeerInfo, PeerStore}; use rand::Rng; use runtime::Runtime; use types::{ - Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, - KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, + Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, Graph, KeysManager, + OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, +}; +pub use types::{ + ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, }; -pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, UserChannelId}; pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, vss_client, @@ -562,7 +565,7 @@ impl Node { }; self.runtime.spawn_background_processor_task(async move { - process_events_async_with_kv_store_sync( + process_events_async( background_persister, |e| background_event_handler.handle_event(e), background_chain_mon, @@ -1478,20 +1481,20 @@ impl Node { /// Exports the current state of the scorer. The result can be shared with and merged by light nodes that only have /// a limited view of the network. pub fn export_pathfinding_scores(&self) -> Result, Error> { - self.kv_store - .read( - lightning::util::persist::SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - lightning::util::persist::SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - lightning::util::persist::SCORER_PERSISTENCE_KEY, - ) - .map_err(|e| { - log_error!( - self.logger, - "Failed to access store while exporting pathfinding scores: {}", - e - ); - Error::PersistenceFailed - }) + KVStoreSync::read( + &*self.kv_store, + lightning::util::persist::SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + lightning::util::persist::SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + lightning::util::persist::SCORER_PERSISTENCE_KEY, + ) + .map_err(|e| { + log_error!( + self.logger, + "Failed to access store while exporting pathfinding scores: {}", + e + ); + Error::PersistenceFailed + }) } } diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index e81fd8216..a7e2d2f9e 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -15,6 +15,7 @@ use bitcoin::hashes::Hash; use lightning::blinded_path::message::BlindedMessagePath; use lightning::impl_writeable_tlv_based; use lightning::offers::static_invoice::StaticInvoice; +use lightning::util::persist::KVStoreSync; use lightning::util::ser::{Readable, Writeable}; use crate::hex_utils; @@ -77,29 +78,33 @@ impl StaticInvoiceStore { let (secondary_namespace, key) = Self::get_storage_location(invoice_slot, recipient_id); - self.kv_store - .read(STATIC_INVOICE_STORE_PRIMARY_NAMESPACE, &secondary_namespace, &key) - .and_then(|data| { - PersistedStaticInvoice::read(&mut &*data) - .map(|persisted_invoice| { - Some((persisted_invoice.invoice, persisted_invoice.request_path)) - }) - .map_err(|e| { - lightning::io::Error::new( - lightning::io::ErrorKind::InvalidData, - format!("Failed to parse static invoice: {:?}", e), - ) - }) - }) - .or_else( - |e| { - if e.kind() == lightning::io::ErrorKind::NotFound { - Ok(None) - } else { - Err(e) - } - }, - ) + KVStoreSync::read( + &*self.kv_store, + STATIC_INVOICE_STORE_PRIMARY_NAMESPACE, + &secondary_namespace, + &key, + ) + .and_then(|data| { + PersistedStaticInvoice::read(&mut &*data) + .map(|persisted_invoice| { + Some((persisted_invoice.invoice, persisted_invoice.request_path)) + }) + .map_err(|e| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidData, + format!("Failed to parse static invoice: {:?}", e), + ) + }) + }) + .or_else( + |e| { + if e.kind() == lightning::io::ErrorKind::NotFound { + Ok(None) + } else { + Err(e) + } + }, + ) } pub(crate) async fn handle_persist_static_invoice( @@ -119,7 +124,13 @@ impl StaticInvoiceStore { // Static invoices will be persisted at "static_invoices//". // // Example: static_invoices/039058c6f2c0cb492c533b0a4d14ef77cc0f78abccced5287d84a1a2011cfb81/00001 - self.kv_store.write(STATIC_INVOICE_STORE_PRIMARY_NAMESPACE, &secondary_namespace, &key, buf) + KVStoreSync::write( + &*self.kv_store, + STATIC_INVOICE_STORE_PRIMARY_NAMESPACE, + &secondary_namespace, + &key, + buf, + ) } fn get_storage_location(invoice_slot: u16, recipient_id: &[u8]) -> (String, String) { diff --git a/src/peer_store.rs b/src/peer_store.rs index 5ebdc0419..82c80c396 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -11,6 +11,7 @@ use std::sync::{Arc, RwLock}; use bitcoin::secp256k1::PublicKey; use lightning::impl_writeable_tlv_based; +use lightning::util::persist::KVStoreSync; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use crate::io::{ @@ -67,24 +68,24 @@ where fn persist_peers(&self, locked_peers: &HashMap) -> Result<(), Error> { let data = PeerStoreSerWrapper(&*locked_peers).encode(); - self.kv_store - .write( + KVStoreSync::write( + &*self.kv_store, + PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PEER_INFO_PERSISTENCE_KEY, + data, + ) + .map_err(|e| { + log_error!( + self.logger, + "Write for key {}/{}/{} failed due to: {}", PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, PEER_INFO_PERSISTENCE_KEY, - data, - ) - .map_err(|e| { - log_error!( - self.logger, - "Write for key {}/{}/{} failed due to: {}", - PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - PEER_INFO_PERSISTENCE_KEY, - e - ); - Error::PersistenceFailed - })?; + e + ); + Error::PersistenceFailed + })?; Ok(()) } } @@ -167,23 +168,23 @@ mod tests { .unwrap(); let address = SocketAddress::from_str("127.0.0.1:9738").unwrap(); let expected_peer_info = PeerInfo { node_id, address }; - assert!(store - .read( - PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - PEER_INFO_PERSISTENCE_KEY, - ) - .is_err()); + assert!(KVStoreSync::read( + &*store, + PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PEER_INFO_PERSISTENCE_KEY, + ) + .is_err()); peer_store.add_peer(expected_peer_info.clone()).unwrap(); // Check we can read back what we persisted. - let persisted_bytes = store - .read( - PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - PEER_INFO_PERSISTENCE_KEY, - ) - .unwrap(); + let persisted_bytes = KVStoreSync::read( + &*store, + PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PEER_INFO_PERSISTENCE_KEY, + ) + .unwrap(); let deser_peer_store = PeerStore::read(&mut &persisted_bytes[..], (Arc::clone(&store), logger)).unwrap(); diff --git a/src/types.rs b/src/types.rs index f152772a1..4f5229dd2 100644 --- a/src/types.rs +++ b/src/types.rs @@ -19,7 +19,7 @@ use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::persist::{KVStoreSync, KVStoreSyncWrapper}; +use lightning::util::persist::{KVStore, KVStoreSync}; use lightning::util::ser::{Readable, Writeable, Writer}; use lightning::util::sweep::OutputSweeper; use lightning_block_sync::gossip::{GossipVerifier, UtxoSource}; @@ -35,7 +35,19 @@ use crate::logger::Logger; use crate::message_handler::NodeCustomMessageHandler; use crate::payment::PaymentDetails; -pub(crate) type DynStore = dyn KVStoreSync + Sync + Send; +/// A supertrait that requires that a type implements both [`KVStore`] and [`KVStoreSync`] at the +/// same time. +pub trait SyncAndAsyncKVStore: KVStore + KVStoreSync {} + +impl SyncAndAsyncKVStore for T +where + T: KVStore, + T: KVStoreSync, +{ +} + +/// A type alias for [`SyncAndAsyncKVStore`] with `Sync`/`Send` markers; +pub type DynStore = dyn SyncAndAsyncKVStore + Sync + Send; pub(crate) type ChainMonitor = chainmonitor::ChainMonitor< InMemorySigner, @@ -133,7 +145,7 @@ pub(crate) type Sweeper = OutputSweeper< Arc, Arc, Arc, - KVStoreSyncWrapper>, + Arc, Arc, Arc, >; diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index cca52ae2d..64a78e11b 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -31,11 +31,10 @@ use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, QrPaymentResult, }; -use ldk_node::{Builder, Event, NodeError}; +use ldk_node::{Builder, DynStore, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::routing::router::RouteParametersConfig; -use lightning::util::persist::KVStoreSync; use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; use log::LevelFilter; @@ -243,7 +242,7 @@ fn start_stop_reinit() { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let test_sync_store: Arc = + let test_sync_store: Arc = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.clone().into())); let sync_config = EsploraSyncConfig { background_sync_config: None }; From e92cadac471c7f28eb6fe0ddc170c39f497b29e0 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 8 Oct 2025 10:09:52 +0200 Subject: [PATCH 101/184] Use `#[allow(deprecated)]` to silence new deprecation warnings The `bdk_wallet` release 2.2 deprecated BDK's signer API in favor of using `bitcoin::psbt::sign`. As the best approach for switching to that API is currently not entirely clear, we intermittently allow for the use of the deprecated APIs to silence the warnings and unbreak our CI. --- src/error.rs | 2 ++ src/wallet/mod.rs | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/error.rs b/src/error.rs index ae47c5ba8..7e9dbac20 100644 --- a/src/error.rs +++ b/src/error.rs @@ -11,6 +11,7 @@ use bdk_chain::bitcoin::psbt::ExtractTxError as BdkExtractTxError; use bdk_chain::local_chain::CannotConnectError as BdkChainConnectionError; use bdk_chain::tx_graph::CalculateFeeError as BdkChainCalculateFeeError; use bdk_wallet::error::CreateTxError as BdkCreateTxError; +#[allow(deprecated)] use bdk_wallet::signer::SignerError as BdkSignerError; #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -207,6 +208,7 @@ impl fmt::Display for Error { impl std::error::Error for Error {} +#[allow(deprecated)] impl From for Error { fn from(_: BdkSignerError) -> Self { Self::OnchainTxSigningFailed diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 0ce4628d4..6d79fe02f 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -11,7 +11,9 @@ use std::str::FromStr; use std::sync::{Arc, Mutex}; use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; -use bdk_wallet::{Balance, KeychainKind, PersistedWallet, SignOptions, Update}; +#[allow(deprecated)] +use bdk_wallet::SignOptions; +use bdk_wallet::{Balance, KeychainKind, PersistedWallet, Update}; use bitcoin::address::NetworkUnchecked; use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; use bitcoin::blockdata::locktime::absolute::LockTime; @@ -222,6 +224,7 @@ impl Wallet { Ok(()) } + #[allow(deprecated)] pub(crate) fn create_funding_transaction( &self, output_script: ScriptBuf, amount: Amount, confirmation_target: ConfirmationTarget, locktime: LockTime, @@ -338,6 +341,7 @@ impl Wallet { .map_err(|_| Error::InvalidAddress) } + #[allow(deprecated)] pub(crate) fn send_to_address( &self, address: &bitcoin::Address, send_amount: OnchainSendAmount, fee_rate: Option, @@ -647,6 +651,7 @@ impl Wallet { Ok(utxos) } + #[allow(deprecated)] fn get_change_script_inner(&self) -> Result { let mut locked_wallet = self.inner.lock().unwrap(); let mut locked_persister = self.persister.lock().unwrap(); @@ -659,6 +664,7 @@ impl Wallet { Ok(address_info.address.script_pubkey()) } + #[allow(deprecated)] fn sign_psbt_inner(&self, mut psbt: Psbt) -> Result { let locked_wallet = self.inner.lock().unwrap(); From a404a94df7e0b99c45bb2c21f4aaf4799363f6e4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 25 Sep 2025 09:10:47 +0200 Subject: [PATCH 102/184] Account for `LiquidityManager` persistence We recently implemented persistence for the `lightning-liquidity` service state. Here we make corresponding changes in LDK Node to have our service state persisted. --- Cargo.toml | 24 +++++----- src/builder.rs | 4 +- src/event.rs | 117 +++++++++++++++++++++++++---------------------- src/liquidity.rs | 91 ++++++++++++++++++++---------------- src/types.rs | 1 + 5 files changed, 130 insertions(+), 107 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9829b7cb7..6e404bd92 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,17 +52,17 @@ default = [] #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } #lightning = { path = "../rust-lightning/lightning", features = ["std"] } #lightning-types = { path = "../rust-lightning/lightning-types" } @@ -109,7 +109,7 @@ winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] #lightning = { version = "0.1.0", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "3e21ba37a133977d4247e86f25df983b39326994", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" diff --git a/src/builder.rs b/src/builder.rs index 3396a52a0..0f627a2fe 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1557,6 +1557,7 @@ fn build_with_store_internal( Arc::clone(&channel_manager), Arc::clone(&keys_manager), Arc::clone(&chain_source), + Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), ); @@ -1590,7 +1591,8 @@ fn build_with_store_internal( liquidity_source_builder.lsps2_service(promise_secret, config.clone()) }); - let liquidity_source = Arc::new(liquidity_source_builder.build()); + let liquidity_source = runtime + .block_on(async move { liquidity_source_builder.build().await.map(Arc::new) })?; let custom_message_handler = Arc::new(NodeCustomMessageHandler::new_liquidity(Arc::clone(&liquidity_source))); (Some(liquidity_source), custom_message_handler) diff --git a/src/event.rs b/src/event.rs index 824cba694..df6649e05 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1046,7 +1046,7 @@ where LdkEvent::ProbeFailed { .. } => {}, LdkEvent::HTLCHandlingFailed { failure_type, .. } => { if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_htlc_handling_failed(failure_type); + liquidity_source.handle_htlc_handling_failed(failure_type).await; } }, LdkEvent::SpendableOutputs { outputs, channel_id } => { @@ -1229,40 +1229,46 @@ where claim_from_onchain_tx, outbound_amount_forwarded_msat, } => { - let read_only_network_graph = self.network_graph.read_only(); - let nodes = read_only_network_graph.nodes(); - let channels = self.channel_manager.list_channels(); - - let node_str = |channel_id: &Option| { - channel_id - .and_then(|channel_id| channels.iter().find(|c| c.channel_id == channel_id)) - .and_then(|channel| { - nodes.get(&NodeId::from_pubkey(&channel.counterparty.node_id)) - }) - .map_or("private_node".to_string(), |node| { - node.announcement_info - .as_ref() - .map_or("unnamed node".to_string(), |ann| { - format!("node {}", ann.alias()) - }) - }) - }; - let channel_str = |channel_id: &Option| { - channel_id - .map(|channel_id| format!(" with channel {}", channel_id)) - .unwrap_or_default() - }; - let from_prev_str = format!( - " from {}{}", - node_str(&prev_channel_id), - channel_str(&prev_channel_id) - ); - let to_next_str = - format!(" to {}{}", node_str(&next_channel_id), channel_str(&next_channel_id)); + { + let read_only_network_graph = self.network_graph.read_only(); + let nodes = read_only_network_graph.nodes(); + let channels = self.channel_manager.list_channels(); + + let node_str = |channel_id: &Option| { + channel_id + .and_then(|channel_id| { + channels.iter().find(|c| c.channel_id == channel_id) + }) + .and_then(|channel| { + nodes.get(&NodeId::from_pubkey(&channel.counterparty.node_id)) + }) + .map_or("private_node".to_string(), |node| { + node.announcement_info + .as_ref() + .map_or("unnamed node".to_string(), |ann| { + format!("node {}", ann.alias()) + }) + }) + }; + let channel_str = |channel_id: &Option| { + channel_id + .map(|channel_id| format!(" with channel {}", channel_id)) + .unwrap_or_default() + }; + let from_prev_str = format!( + " from {}{}", + node_str(&prev_channel_id), + channel_str(&prev_channel_id) + ); + let to_next_str = format!( + " to {}{}", + node_str(&next_channel_id), + channel_str(&next_channel_id) + ); - let fee_earned = total_fee_earned_msat.unwrap_or(0); - if claim_from_onchain_tx { - log_info!( + let fee_earned = total_fee_earned_msat.unwrap_or(0); + if claim_from_onchain_tx { + log_info!( self.logger, "Forwarded payment{}{} of {}msat, earning {}msat in fees from claiming onchain.", from_prev_str, @@ -1270,19 +1276,20 @@ where outbound_amount_forwarded_msat.unwrap_or(0), fee_earned, ); - } else { - log_info!( - self.logger, - "Forwarded payment{}{} of {}msat, earning {}msat in fees.", - from_prev_str, - to_next_str, - outbound_amount_forwarded_msat.unwrap_or(0), - fee_earned, - ); + } else { + log_info!( + self.logger, + "Forwarded payment{}{} of {}msat, earning {}msat in fees.", + from_prev_str, + to_next_str, + outbound_amount_forwarded_msat.unwrap_or(0), + fee_earned, + ); + } } if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_payment_forwarded(next_channel_id); + liquidity_source.handle_payment_forwarded(next_channel_id).await; } let event = Event::PaymentForwarded { @@ -1375,11 +1382,9 @@ where ); if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_channel_ready( - user_channel_id, - &channel_id, - &counterparty_node_id, - ); + liquidity_source + .handle_channel_ready(user_channel_id, &channel_id, &counterparty_node_id) + .await; } let event = Event::ChannelReady { @@ -1428,12 +1433,14 @@ where .. } => { if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_htlc_intercepted( - requested_next_hop_scid, - intercept_id, - expected_outbound_amount_msat, - payment_hash, - ); + liquidity_source + .handle_htlc_intercepted( + requested_next_hop_scid, + intercept_id, + expected_outbound_amount_msat, + payment_hash, + ) + .await; } }, LdkEvent::InvoiceReceived { .. } => { diff --git a/src/liquidity.rs b/src/liquidity.rs index ae31f9ace..a09848b38 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -38,11 +38,12 @@ use lightning_types::payment::PaymentHash; use rand::Rng; use tokio::sync::oneshot; +use crate::builder::BuildError; use crate::chain::ChainSource; use crate::connection::ConnectionManager; use crate::logger::{log_debug, log_error, log_info, LdkLogger, Logger}; use crate::runtime::Runtime; -use crate::types::{ChannelManager, KeysManager, LiquidityManager, PeerManager, Wallet}; +use crate::types::{ChannelManager, DynStore, KeysManager, LiquidityManager, PeerManager, Wallet}; use crate::{total_anchor_channels_reserve_sats, Config, Error}; const LIQUIDITY_REQUEST_TIMEOUT_SECS: u64 = 5; @@ -140,6 +141,7 @@ where channel_manager: Arc, keys_manager: Arc, chain_source: Arc, + kv_store: Arc, config: Arc, logger: L, } @@ -150,7 +152,7 @@ where { pub(crate) fn new( wallet: Arc, channel_manager: Arc, keys_manager: Arc, - chain_source: Arc, config: Arc, logger: L, + chain_source: Arc, kv_store: Arc, config: Arc, logger: L, ) -> Self { let lsps1_client = None; let lsps2_client = None; @@ -163,6 +165,7 @@ where channel_manager, keys_manager, chain_source, + kv_store, config, logger, } @@ -213,7 +216,7 @@ where self } - pub(crate) fn build(self) -> LiquiditySource { + pub(crate) async fn build(self) -> Result, BuildError> { let liquidity_service_config = self.lsps2_service.as_ref().map(|s| { let lsps2_service_config = Some(s.ldk_service_config.clone()); let lsps5_service_config = None; @@ -230,17 +233,22 @@ where lsps5_client_config, }); - let liquidity_manager = Arc::new(LiquidityManager::new( - Arc::clone(&self.keys_manager), - Arc::clone(&self.keys_manager), - Arc::clone(&self.channel_manager), - Some(Arc::clone(&self.chain_source)), - None, - liquidity_service_config, - liquidity_client_config, - )); + let liquidity_manager = Arc::new( + LiquidityManager::new( + Arc::clone(&self.keys_manager), + Arc::clone(&self.keys_manager), + Arc::clone(&self.channel_manager), + Some(Arc::clone(&self.chain_source)), + None, + Arc::clone(&self.kv_store), + liquidity_service_config, + liquidity_client_config, + ) + .await + .map_err(|_| BuildError::ReadFailed)?, + ); - LiquiditySource { + Ok(LiquiditySource { lsps1_client: self.lsps1_client, lsps2_client: self.lsps2_client, lsps2_service: self.lsps2_service, @@ -251,7 +259,7 @@ where liquidity_manager, config: self.config, logger: self.logger, - } + }) } } @@ -574,14 +582,17 @@ where } } - match lsps2_service_handler.invoice_parameters_generated( - &counterparty_node_id, - request_id, - intercept_scid, - LSPS2_CHANNEL_CLTV_EXPIRY_DELTA, - LSPS2_CLIENT_TRUSTS_LSP_MODE, - user_channel_id, - ) { + match lsps2_service_handler + .invoice_parameters_generated( + &counterparty_node_id, + request_id, + intercept_scid, + LSPS2_CHANNEL_CLTV_EXPIRY_DELTA, + LSPS2_CLIENT_TRUSTS_LSP_MODE, + user_channel_id, + ) + .await + { Ok(()) => {}, Err(e) => { log_error!( @@ -1239,15 +1250,14 @@ where }) } - pub(crate) fn handle_channel_ready( + pub(crate) async fn handle_channel_ready( &self, user_channel_id: u128, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.channel_ready( - user_channel_id, - channel_id, - counterparty_node_id, - ) { + if let Err(e) = lsps2_service_handler + .channel_ready(user_channel_id, channel_id, counterparty_node_id) + .await + { log_error!( self.logger, "LSPS2 service failed to handle ChannelReady event: {:?}", @@ -1257,17 +1267,20 @@ where } } - pub(crate) fn handle_htlc_intercepted( + pub(crate) async fn handle_htlc_intercepted( &self, intercept_scid: u64, intercept_id: InterceptId, expected_outbound_amount_msat: u64, payment_hash: PaymentHash, ) { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.htlc_intercepted( - intercept_scid, - intercept_id, - expected_outbound_amount_msat, - payment_hash, - ) { + if let Err(e) = lsps2_service_handler + .htlc_intercepted( + intercept_scid, + intercept_id, + expected_outbound_amount_msat, + payment_hash, + ) + .await + { log_error!( self.logger, "LSPS2 service failed to handle HTLCIntercepted event: {:?}", @@ -1277,9 +1290,9 @@ where } } - pub(crate) fn handle_htlc_handling_failed(&self, failure_type: HTLCHandlingFailureType) { + pub(crate) async fn handle_htlc_handling_failed(&self, failure_type: HTLCHandlingFailureType) { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.htlc_handling_failed(failure_type) { + if let Err(e) = lsps2_service_handler.htlc_handling_failed(failure_type).await { log_error!( self.logger, "LSPS2 service failed to handle HTLCHandlingFailed event: {:?}", @@ -1289,10 +1302,10 @@ where } } - pub(crate) fn handle_payment_forwarded(&self, next_channel_id: Option) { + pub(crate) async fn handle_payment_forwarded(&self, next_channel_id: Option) { if let Some(next_channel_id) = next_channel_id { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.payment_forwarded(next_channel_id) { + if let Err(e) = lsps2_service_handler.payment_forwarded(next_channel_id).await { log_error!( self.logger, "LSPS2 service failed to handle PaymentForwarded: {:?}", diff --git a/src/types.rs b/src/types.rs index 4f5229dd2..ccfde2766 100644 --- a/src/types.rs +++ b/src/types.rs @@ -75,6 +75,7 @@ pub(crate) type LiquidityManager = lightning_liquidity::LiquidityManager< Arc, Arc, Arc, + Arc, Arc, >; From 351a0a91dfe0dc8816527cf5b3d9aab994ac243a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 25 Sep 2025 16:19:55 +0200 Subject: [PATCH 103/184] Account for dropped `Arc` for `DefaultTimeProvider` --- src/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/types.rs b/src/types.rs index ccfde2766..252efd042 100644 --- a/src/types.rs +++ b/src/types.rs @@ -76,7 +76,7 @@ pub(crate) type LiquidityManager = lightning_liquidity::LiquidityManager< Arc, Arc, Arc, - Arc, + DefaultTimeProvider, >; pub(crate) type ChannelManager = lightning::ln::channelmanager::ChannelManager< From 9977903132e56afa78804b2e3de1bee29a9b410a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 25 Sep 2025 16:20:22 +0200 Subject: [PATCH 104/184] Account for `lazy` being dropped from `KVStore::remove` --- Cargo.toml | 50 +++++++++++++++++++------------------- src/data_store.rs | 1 - src/io/sqlite_store/mod.rs | 8 +++--- src/io/test_utils.rs | 4 +-- src/io/vss_store.rs | 8 +++--- tests/common/mod.rs | 30 ++++++++--------------- 6 files changed, 43 insertions(+), 58 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6e404bd92..f684c68b3 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,29 +52,29 @@ default = [] #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } - -#lightning = { path = "../rust-lightning/lightning", features = ["std"] } -#lightning-types = { path = "../rust-lightning/lightning-types" } -#lightning-invoice = { path = "../rust-lightning/lightning-invoice", features = ["std"] } -#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } -#lightning-persister = { path = "../rust-lightning/lightning-persister", features = ["tokio"] } -#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } -#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } -#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity", features = ["std"] } -#lightning-macros = { path = "../rust-lightning/lightning-macros" } +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std"] } +#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std"] } +#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["tokio"] } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["rest-client", "rpc-client", "tokio"] } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } +#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } + +lightning = { path = "../rust-lightning/lightning", features = ["std"] } +lightning-types = { path = "../rust-lightning/lightning-types" } +lightning-invoice = { path = "../rust-lightning/lightning-invoice", features = ["std"] } +lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } +lightning-persister = { path = "../rust-lightning/lightning-persister", features = ["tokio"] } +lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } +lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } +lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +lightning-liquidity = { path = "../rust-lightning/lightning-liquidity", features = ["std"] } +lightning-macros = { path = "../rust-lightning/lightning-macros" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -109,8 +109,8 @@ winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] #lightning = { version = "0.1.0", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std", "_test_utils"] } -#lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std", "_test_utils"] } +lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" diff --git a/src/data_store.rs b/src/data_store.rs index 83cbf4476..ce4b294e0 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -103,7 +103,6 @@ where &self.primary_namespace, &self.secondary_namespace, &store_key, - false, ) .map_err(|e| { log_error!( diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 6ba41f714..c41df8ea0 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -137,7 +137,7 @@ impl KVStore for SqliteStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Pin> + Send>> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); @@ -153,7 +153,6 @@ impl KVStore for SqliteStore { &primary_namespace, &secondary_namespace, &key, - lazy, ) }); Box::pin(async move { @@ -206,7 +205,7 @@ impl KVStoreSync for SqliteStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result<()> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); @@ -217,7 +216,6 @@ impl KVStoreSync for SqliteStore { primary_namespace, secondary_namespace, key, - lazy, ) } @@ -402,7 +400,7 @@ impl SqliteStoreInner { fn remove_internal( &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 067664851..22f1a4ea5 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -55,7 +55,7 @@ pub(crate) fn do_read_write_remove_list_persist( let read_data = kv_store.read(primary_namespace, secondary_namespace, key).unwrap(); assert_eq!(data, &*read_data); - kv_store.remove(primary_namespace, secondary_namespace, key, false).unwrap(); + kv_store.remove(primary_namespace, secondary_namespace, key).unwrap(); let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap(); assert_eq!(listed_keys.len(), 0); @@ -71,7 +71,7 @@ pub(crate) fn do_read_write_remove_list_persist( let read_data = kv_store.read(&max_chars, &max_chars, &max_chars).unwrap(); assert_eq!(data, &*read_data); - kv_store.remove(&max_chars, &max_chars, &max_chars, false).unwrap(); + kv_store.remove(&max_chars, &max_chars, &max_chars).unwrap(); let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap(); assert_eq!(listed_keys.len(), 0); diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index c5442d1ac..134ff7af2 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -116,7 +116,7 @@ impl KVStoreSync for VssStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result<()> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); @@ -127,7 +127,6 @@ impl KVStoreSync for VssStore { primary_namespace, secondary_namespace, key, - lazy, ); self.runtime.block_on(fut) } @@ -174,7 +173,7 @@ impl KVStore for VssStore { }) } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Pin> + Send>> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); @@ -191,7 +190,6 @@ impl KVStore for VssStore { &primary_namespace, &secondary_namespace, &key, - lazy, ) .await }) @@ -369,7 +367,7 @@ impl VssStoreInner { async fn remove_internal( &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 817d0edc5..3ac0e8432 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1240,14 +1240,14 @@ impl KVStore for TestSyncStore { }) } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Pin> + Send>> { let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); let inner = Arc::clone(&self.inner); let fut = tokio::task::spawn_blocking(move || { - inner.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy) + inner.remove_internal(&primary_namespace, &secondary_namespace, &key) }); Box::pin(async move { fut.await.unwrap_or_else(|e| { @@ -1288,9 +1288,9 @@ impl KVStoreSync for TestSyncStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> lightning::io::Result<()> { - self.inner.remove_internal(primary_namespace, secondary_namespace, key, lazy) + self.inner.remove_internal(primary_namespace, secondary_namespace, key) } fn list( @@ -1428,25 +1428,15 @@ impl TestSyncStoreInner { } fn remove_internal( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); let fs_res = - KVStoreSync::remove(&self.fs_store, primary_namespace, secondary_namespace, key, lazy); - let sqlite_res = KVStoreSync::remove( - &self.sqlite_store, - primary_namespace, - secondary_namespace, - key, - lazy, - ); - let test_res = KVStoreSync::remove( - &self.test_store, - primary_namespace, - secondary_namespace, - key, - lazy, - ); + KVStoreSync::remove(&self.fs_store, primary_namespace, secondary_namespace, key); + let sqlite_res = + KVStoreSync::remove(&self.sqlite_store, primary_namespace, secondary_namespace, key); + let test_res = + KVStoreSync::remove(&self.test_store, primary_namespace, secondary_namespace, key); assert!(!self .do_list(primary_namespace, secondary_namespace) From 7855d38c2f31f79a43bad13f97cc6ee9872bef56 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 30 Sep 2025 10:01:04 +0200 Subject: [PATCH 105/184] Account for lifetime change in `get_change_destination_script` --- src/wallet/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 6d79fe02f..c72c5b9f3 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -916,7 +916,7 @@ impl SignerProvider for WalletKeysManager { impl ChangeDestinationSource for WalletKeysManager { fn get_change_destination_script<'a>( - &self, + &'a self, ) -> Pin> + Send + 'a>> { let wallet = Arc::clone(&self.wallet); let logger = Arc::clone(&self.logger); From cabcbb3713f99c5ff1727a113311892de70e7d14 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 30 Sep 2025 10:07:56 +0200 Subject: [PATCH 106/184] Account for `LiquidityManager` taking a broadcaster --- src/builder.rs | 1 + src/event.rs | 11 ++++++++++- src/liquidity.rs | 18 ++++++++++++++---- src/types.rs | 1 + 4 files changed, 26 insertions(+), 5 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 0f627a2fe..b64fc23a0 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1557,6 +1557,7 @@ fn build_with_store_internal( Arc::clone(&channel_manager), Arc::clone(&keys_manager), Arc::clone(&chain_source), + Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), diff --git a/src/event.rs b/src/event.rs index df6649e05..e14479972 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1289,7 +1289,16 @@ where } if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_payment_forwarded(next_channel_id).await; + if let Some(skimmed_fee_msat) = skimmed_fee_msat { + liquidity_source + .handle_payment_forwarded(next_channel_id, skimmed_fee_msat) + .await; + } else { + debug_assert!( + false, + "We expect skimmed_fee_msat to be set since LDK 0.0.122" + ); + } } let event = Event::PaymentForwarded { diff --git a/src/liquidity.rs b/src/liquidity.rs index a09848b38..81d48e530 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -43,7 +43,9 @@ use crate::chain::ChainSource; use crate::connection::ConnectionManager; use crate::logger::{log_debug, log_error, log_info, LdkLogger, Logger}; use crate::runtime::Runtime; -use crate::types::{ChannelManager, DynStore, KeysManager, LiquidityManager, PeerManager, Wallet}; +use crate::types::{ + Broadcaster, ChannelManager, DynStore, KeysManager, LiquidityManager, PeerManager, Wallet, +}; use crate::{total_anchor_channels_reserve_sats, Config, Error}; const LIQUIDITY_REQUEST_TIMEOUT_SECS: u64 = 5; @@ -141,6 +143,7 @@ where channel_manager: Arc, keys_manager: Arc, chain_source: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: L, @@ -152,7 +155,8 @@ where { pub(crate) fn new( wallet: Arc, channel_manager: Arc, keys_manager: Arc, - chain_source: Arc, kv_store: Arc, config: Arc, logger: L, + chain_source: Arc, tx_broadcaster: Arc, kv_store: Arc, + config: Arc, logger: L, ) -> Self { let lsps1_client = None; let lsps2_client = None; @@ -165,6 +169,7 @@ where channel_manager, keys_manager, chain_source, + tx_broadcaster, kv_store, config, logger, @@ -241,6 +246,7 @@ where Some(Arc::clone(&self.chain_source)), None, Arc::clone(&self.kv_store), + Arc::clone(&self.tx_broadcaster), liquidity_service_config, liquidity_client_config, ) @@ -1302,10 +1308,14 @@ where } } - pub(crate) async fn handle_payment_forwarded(&self, next_channel_id: Option) { + pub(crate) async fn handle_payment_forwarded( + &self, next_channel_id: Option, skimmed_fee_msat: u64, + ) { if let Some(next_channel_id) = next_channel_id { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.payment_forwarded(next_channel_id).await { + if let Err(e) = + lsps2_service_handler.payment_forwarded(next_channel_id, skimmed_fee_msat).await + { log_error!( self.logger, "LSPS2 service failed to handle PaymentForwarded: {:?}", diff --git a/src/types.rs b/src/types.rs index 252efd042..ddd587985 100644 --- a/src/types.rs +++ b/src/types.rs @@ -77,6 +77,7 @@ pub(crate) type LiquidityManager = lightning_liquidity::LiquidityManager< Arc, Arc, DefaultTimeProvider, + Arc, >; pub(crate) type ChannelManager = lightning::ln::channelmanager::ChannelManager< From 9ec89fe73e2817191446c325b83f376fd51eb365 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 2 Oct 2025 14:24:48 +0200 Subject: [PATCH 107/184] Account for `OutputSweeper::read` being dropped --- src/io/utils.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/io/utils.rs b/src/io/utils.rs index cb3ca0847..1556314c4 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -32,7 +32,6 @@ use lightning::util::persist::{ SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; -use lightning::util::sweep::OutputSweeper; use lightning_types::string::PrintableString; use rand::{thread_rng, RngCore}; @@ -256,10 +255,11 @@ pub(crate) fn read_output_sweeper( kv_store, logger.clone(), ); - OutputSweeper::read(&mut reader, args).map_err(|e| { + let (_, sweeper) = <(_, Sweeper)>::read(&mut reader, args).map_err(|e| { log_error!(logger, "Failed to deserialize OutputSweeper: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize OutputSweeper") - }) + })?; + Ok(sweeper) } pub(crate) fn read_node_metrics( From 22761575ac279cdec0a7dc59dace5458b8d587b8 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 13 Oct 2025 10:11:08 +0200 Subject: [PATCH 108/184] Bump to latest LDK `main` - We fix an oversight introduced in 9977903132e56afa78804b2e3de1bee29a9b410a where we switched `Cargo.toml` to use local dependencies instead of pointing to an appropriate upstream commit - We account for Splicing-related events - We account for `KeysManager` taking a new flag for v2-payment-key derivation. --- Cargo.toml | 50 +++++++++++++++++++++++------------------------ src/event.rs | 12 ++++++++++++ src/wallet/mod.rs | 2 +- 3 files changed, 38 insertions(+), 26 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f684c68b3..e2afb0be7 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,29 +52,29 @@ default = [] #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std"] } -#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std"] } -#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["tokio"] } -#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } -#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204" } - -lightning = { path = "../rust-lightning/lightning", features = ["std"] } -lightning-types = { path = "../rust-lightning/lightning-types" } -lightning-invoice = { path = "../rust-lightning/lightning-invoice", features = ["std"] } -lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } -lightning-persister = { path = "../rust-lightning/lightning-persister", features = ["tokio"] } -lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } -lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } -lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -lightning-liquidity = { path = "../rust-lightning/lightning-liquidity", features = ["std"] } -lightning-macros = { path = "../rust-lightning/lightning-macros" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } + +#lightning = { path = "../rust-lightning/lightning", features = ["std"] } +#lightning-types = { path = "../rust-lightning/lightning-types" } +#lightning-invoice = { path = "../rust-lightning/lightning-invoice", features = ["std"] } +#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } +#lightning-persister = { path = "../rust-lightning/lightning-persister", features = ["tokio"] } +#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } +#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } +#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rest-client", "rpc-client", "tokio"] } +#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity", features = ["std"] } +#lightning-macros = { path = "../rust-lightning/lightning-macros" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -109,8 +109,8 @@ winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] #lightning = { version = "0.1.0", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "d0765847c85f1c3dc753c17c3e05dbcb1d300204", features = ["std", "_test_utils"] } -lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std", "_test_utils"] } +#lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" diff --git a/src/event.rs b/src/event.rs index e14479972..79fc5e5db 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1601,6 +1601,18 @@ where LdkEvent::FundingTransactionReadyForSigning { .. } => { debug_assert!(false, "We currently don't support interactive-tx, so this event should never be emitted."); }, + LdkEvent::SplicePending { .. } => { + debug_assert!( + false, + "We currently don't support splicing, so this event should never be emitted." + ); + }, + LdkEvent::SpliceFailed { .. } => { + debug_assert!( + false, + "We currently don't support splicing, so this event should never be emitted." + ); + }, } Ok(()) } diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index c72c5b9f3..8f8151b9c 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -792,7 +792,7 @@ impl WalletKeysManager { seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32, wallet: Arc, logger: Arc, ) -> Self { - let inner = KeysManager::new(seed, starting_time_secs, starting_time_nanos); + let inner = KeysManager::new(seed, starting_time_secs, starting_time_nanos, true); Self { inner, wallet, logger } } From 813b162778c28fc004f0dd11f4965e429cc43307 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 14 Oct 2025 09:37:48 +0200 Subject: [PATCH 109/184] Bump LDK to v0.2.0-beta1 We update our LDK dependency to the just-released v0.2.0-beta1. --- Cargo.toml | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e2afb0be7..7b888c929 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,17 +28,17 @@ panic = 'abort' # Abort on panic default = [] [dependencies] -#lightning = { version = "0.1.0", features = ["std"] } -#lightning-types = { version = "0.2.0" } -#lightning-invoice = { version = "0.33.0", features = ["std"] } -#lightning-net-tokio = { version = "0.1.0" } -#lightning-persister = { version = "0.1.0", features = ["tokio"] } -#lightning-background-processor = { version = "0.1.0" } -#lightning-rapid-gossip-sync = { version = "0.1.0" } -#lightning-block-sync = { version = "0.1.0", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { version = "0.1.0", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -#lightning-liquidity = { version = "0.1.0", features = ["std"] } -#lightning-macros = { version = "0.1.0" } +lightning = { version = "0.2.0-beta1", features = ["std"] } +lightning-types = { version = "0.3.0-beta1" } +lightning-invoice = { version = "0.34.0-beta1", features = ["std"] } +lightning-net-tokio = { version = "0.2.0-beta1" } +lightning-persister = { version = "0.2.0-beta1", features = ["tokio"] } +lightning-background-processor = { version = "0.2.0-beta1" } +lightning-rapid-gossip-sync = { version = "0.2.0-beta1" } +lightning-block-sync = { version = "0.2.0-beta1", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { version = "0.2.0-beta1", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { version = "0.2.0-beta1", features = ["std"] } +lightning-macros = { version = "0.2.0-beta1" } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } #lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } @@ -52,17 +52,17 @@ default = [] #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } +#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } +#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["tokio"] } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["rest-client", "rpc-client", "tokio"] } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } +#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning = { path = "../rust-lightning/lightning", features = ["std"] } #lightning-types = { path = "../rust-lightning/lightning-types" } @@ -107,9 +107,9 @@ prost = { version = "0.11.6", default-features = false} winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -#lightning = { version = "0.1.0", features = ["std", "_test_utils"] } +lightning = { version = "0.2.0-beta1", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std", "_test_utils"] } +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" From 4419953a71f0d7f8ffcb2ba48a82a0624763fd98 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 14 Oct 2025 11:12:24 +0200 Subject: [PATCH 110/184] Switch to use Rust VSS server in CI Since we just deprecated the Java version of the VSS server, we here switch our CI over to use the Rust version. --- .github/workflows/vss-integration.yml | 51 +++------------------------ 1 file changed, 5 insertions(+), 46 deletions(-) diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index f7a230780..81b63fdf9 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -18,7 +18,7 @@ jobs: env: POSTGRES_DB: postgres POSTGRES_USER: postgres - POSTGRES_PASSWORD: YOU_MUST_CHANGE_THIS_PASSWORD + POSTGRES_PASSWORD: postgres options: >- --health-cmd pg_isready --health-interval 10s @@ -36,54 +36,13 @@ jobs: repository: lightningdevkit/vss-server path: vss-server - - name: Set up Java - uses: actions/setup-java@v3 - with: - distribution: 'corretto' - java-version: '17' - - - name: Start Tomcat + - name: Build and Deploy VSS Server run: | - docker run -d --network=host --name tomcat tomcat:latest - - - name: Setup Gradle - uses: gradle/gradle-build-action@v2 - with: - gradle-version: release-candidate - - - name: Create database table - run: | - psql -h localhost -U postgres -d postgres -f ./vss-server/java/app/src/main/java/org/vss/impl/postgres/sql/v0_create_vss_db.sql - env: - PGPASSWORD: YOU_MUST_CHANGE_THIS_PASSWORD - - - name: Build and Deploy VSS - run: | - # Print Info - java -version - gradle --version - - GRADLE_VERSION=$(gradle --version | awk '/^Gradle/ {print $2}' | head -1) - if [ -z "$GRADLE_VERSION" ]; then - echo "Error: Failed to extract Gradle version." >&2 - exit 1 - fi - echo "Extracted Gradle Version: $GRADLE_VERSION" - - cd vss-server/java - gradle wrapper --gradle-version $GRADLE_VERSION - ./gradlew --version - ./gradlew build - - docker cp app/build/libs/vss-1.0.war tomcat:/usr/local/tomcat/webapps/vss.war - cd ../ - - name: Run VSS Integration tests against vss-instance. + cd vss-server/rust + cargo run server/vss-server-config.toml& + - name: Run VSS Integration tests run: | cd ldk-node export TEST_VSS_BASE_URL="http://localhost:8080/vss" RUSTFLAGS="--cfg vss_test" cargo build --verbose --color always RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss - - - name: Cleanup - run: | - docker stop tomcat && docker rm tomcat From b3e89c683a613153bf384cfa34163a979efc8c17 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 16 Oct 2025 09:58:48 +0200 Subject: [PATCH 111/184] Remove erroneous `debug_assert` when handling `PaymentForwarded` Recent LDK changes made `skimmed_fee_msat` a required field of the LSPS service handler's `payment_forwarded` API, which seemed reasonable given that the field is available since LDK 0.0.122. However, when updating LDK Node we introduced a `debug_assert` that checked the field to be *always* set, which is wrong, as it's only set post-0.0.122 *if* there was some fee skimmed. Here we fix this oversight. --- src/event.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/event.rs b/src/event.rs index 79fc5e5db..db6ef13f1 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1289,16 +1289,10 @@ where } if let Some(liquidity_source) = self.liquidity_source.as_ref() { - if let Some(skimmed_fee_msat) = skimmed_fee_msat { - liquidity_source - .handle_payment_forwarded(next_channel_id, skimmed_fee_msat) - .await; - } else { - debug_assert!( - false, - "We expect skimmed_fee_msat to be set since LDK 0.0.122" - ); - } + let skimmed_fee_msat = skimmed_fee_msat.unwrap_or(0); + liquidity_source + .handle_payment_forwarded(next_channel_id, skimmed_fee_msat) + .await; } let event = Event::PaymentForwarded { From fc6a7ff1fe1a8a8d756e38659f740e36b43c3172 Mon Sep 17 00:00:00 2001 From: Artur Gontijo Date: Sat, 1 Feb 2025 12:25:54 -0300 Subject: [PATCH 112/184] Use `MonitorUpdatingPersister` We use LDK's `MonitorUpdatingPersister` to persist (and read) differential updates, instead of always repersisting the full monitors. --- src/builder.rs | 48 ++++++++------- src/io/test_utils.rs | 139 ++++++++++++++++++++++++++++--------------- src/types.rs | 13 +++- 3 files changed, 128 insertions(+), 72 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index b64fc23a0..b4a146e7c 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -31,8 +31,8 @@ use lightning::routing::scoring::{ }; use lightning::sign::{EntropySource, NodeSigner}; use lightning::util::persist::{ - read_channel_monitors, KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::ReadableArgs; use lightning::util::sweep::OutputSweeper; @@ -66,7 +66,7 @@ use crate::runtime::Runtime; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeysManager, MessageRouter, - OnionMessenger, PaymentStore, PeerManager, + OnionMessenger, PaymentStore, PeerManager, Persister, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; @@ -75,6 +75,7 @@ use crate::{Node, NodeMetrics}; const VSS_HARDENED_CHILD_INDEX: u32 = 877; const VSS_LNURL_AUTH_HARDENED_CHILD_INDEX: u32 = 138; const LSPS_HARDENED_CHILD_INDEX: u32 = 577; +const PERSISTER_MAX_PENDING_UPDATES: u64 = 100; #[derive(Debug, Clone)] enum ChainDataSourceConfig { @@ -1317,6 +1318,28 @@ fn build_with_store_internal( )); let peer_storage_key = keys_manager.get_peer_storage_key(); + let persister = Arc::new(Persister::new( + Arc::clone(&kv_store), + Arc::clone(&logger), + PERSISTER_MAX_PENDING_UPDATES, + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + )); + + // Read ChannelMonitor state from store + let channel_monitors = match persister.read_all_channel_monitors_with_updates() { + Ok(monitors) => monitors, + Err(e) => { + if e.kind() == lightning::io::ErrorKind::NotFound { + Vec::new() + } else { + log_error!(logger, "Failed to read channel monitors: {}", e.to_string()); + return Err(BuildError::ReadFailed); + } + }, + }; // Initialize the ChainMonitor let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( @@ -1324,7 +1347,7 @@ fn build_with_store_internal( Arc::clone(&tx_broadcaster), Arc::clone(&logger), Arc::clone(&fee_estimator), - Arc::clone(&kv_store), + Arc::clone(&persister), Arc::clone(&keys_manager), peer_storage_key, )); @@ -1371,23 +1394,6 @@ fn build_with_store_internal( scoring_fee_params, )); - // Read ChannelMonitor state from store - let channel_monitors = match read_channel_monitors( - Arc::clone(&kv_store), - Arc::clone(&keys_manager), - Arc::clone(&keys_manager), - ) { - Ok(monitors) => monitors, - Err(e) => { - if e.kind() == lightning::io::ErrorKind::NotFound { - Vec::new() - } else { - log_error!(logger, "Failed to read channel monitors: {}", e.to_string()); - return Err(BuildError::ReadFailed); - } - }, - }; - let mut user_config = default_user_config(&config); if liquidity_source_config.and_then(|lsc| lsc.lsps2_service.as_ref()).is_some() { diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 22f1a4ea5..8fbf4279d 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -11,14 +11,28 @@ use std::path::PathBuf; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block, - create_network, create_node_cfgs, create_node_chanmgrs, send_payment, + create_network, create_node_cfgs, create_node_chanmgrs, send_payment, TestChanMonCfg, }; -use lightning::util::persist::{read_channel_monitors, KVStoreSync, KVSTORE_NAMESPACE_KEY_MAX_LEN}; +use lightning::util::persist::{ + KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, +}; + use lightning::util::test_utils; use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; +type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< + &'a K, + &'a test_utils::TestLogger, + &'a test_utils::TestKeysInterface, + &'a test_utils::TestKeysInterface, + &'a test_utils::TestBroadcaster, + &'a test_utils::TestFeeEstimator, +>; + +const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5; + pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); let mut rng = thread_rng(); @@ -77,27 +91,50 @@ pub(crate) fn do_read_write_remove_list_persist( assert_eq!(listed_keys.len(), 0); } +pub(crate) fn create_persister<'a, K: KVStoreSync + Sync>( + store: &'a K, chanmon_cfg: &'a TestChanMonCfg, max_pending_updates: u64, +) -> TestMonitorUpdatePersister<'a, K> { + MonitorUpdatingPersister::new( + store, + &chanmon_cfg.logger, + max_pending_updates, + &chanmon_cfg.keys_manager, + &chanmon_cfg.keys_manager, + &chanmon_cfg.tx_broadcaster, + &chanmon_cfg.fee_estimator, + ) +} + +pub(crate) fn create_chain_monitor<'a, K: KVStoreSync + Sync>( + chanmon_cfg: &'a TestChanMonCfg, persister: &'a TestMonitorUpdatePersister<'a, K>, +) -> test_utils::TestChainMonitor<'a> { + test_utils::TestChainMonitor::new( + Some(&chanmon_cfg.chain_source), + &chanmon_cfg.tx_broadcaster, + &chanmon_cfg.logger, + &chanmon_cfg.fee_estimator, + persister, + &chanmon_cfg.keys_manager, + ) +} + // Integration-test the given KVStore implementation. Test relaying a few payments and check that // the persisted data is updated the appropriate number of times. pub(crate) fn do_test_store(store_0: &K, store_1: &K) { + // This value is used later to limit how many iterations we perform. + let persister_0_max_pending_updates = 7; + // Intentionally set this to a smaller value to test a different alignment. + let persister_1_max_pending_updates = 3; + let chanmon_cfgs = create_chanmon_cfgs(2); + + let persister_0 = create_persister(store_0, &chanmon_cfgs[0], persister_0_max_pending_updates); + let persister_1 = create_persister(store_1, &chanmon_cfgs[1], persister_1_max_pending_updates); + + let chain_mon_0 = create_chain_monitor(&chanmon_cfgs[0], &persister_0); + let chain_mon_1 = create_chain_monitor(&chanmon_cfgs[1], &persister_1); + let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let chain_mon_0 = test_utils::TestChainMonitor::new( - Some(&chanmon_cfgs[0].chain_source), - &chanmon_cfgs[0].tx_broadcaster, - &chanmon_cfgs[0].logger, - &chanmon_cfgs[0].fee_estimator, - store_0, - node_cfgs[0].keys_manager, - ); - let chain_mon_1 = test_utils::TestChainMonitor::new( - Some(&chanmon_cfgs[1].chain_source), - &chanmon_cfgs[1].tx_broadcaster, - &chanmon_cfgs[1].logger, - &chanmon_cfgs[1].fee_estimator, - store_1, - node_cfgs[1].keys_manager, - ); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -105,26 +142,20 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { // Check that the persisted channel data is empty before any channels are // open. - let mut persisted_chan_data_0 = - read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap(); + let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_0.len(), 0); - let mut persisted_chan_data_1 = - read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap(); + let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_1.len(), 0); // Helper to make sure the channel is on the expected update ID. macro_rules! check_persisted_data { ($expected_update_id: expr) => { - persisted_chan_data_0 = - read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager) - .unwrap(); + persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_0.len(), 1); for (_, mon) in persisted_chan_data_0.iter() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); } - persisted_chan_data_1 = - read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager) - .unwrap(); + persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_1.len(), 1); for (_, mon) in persisted_chan_data_1.iter() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); @@ -137,10 +168,29 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { check_persisted_data!(0); // Send a few payments and make sure the monitors are updated to the latest. - send_payment(&nodes[0], &vec![&nodes[1]][..], 8000000); - check_persisted_data!(5); - send_payment(&nodes[1], &vec![&nodes[0]][..], 4000000); - check_persisted_data!(10); + let expected_route = &[&nodes[1]][..]; + send_payment(&nodes[0], expected_route, 8_000_000); + check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT); + let expected_route = &[&nodes[0]][..]; + send_payment(&nodes[1], expected_route, 4_000_000); + check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT); + + // Send a few more payments to try all the alignments of max pending updates with + // updates for a payment sent and received. + let mut sender = 0; + for i in 3..=persister_0_max_pending_updates * 2 { + let receiver; + if sender == 0 { + sender = 1; + receiver = 0; + } else { + sender = 0; + receiver = 1; + } + let expected_route = &[&nodes[receiver]][..]; + send_payment(&nodes[sender], expected_route, 21_000); + check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT); + } // Force close because cooperative close doesn't result in any persisted // updates. @@ -163,27 +213,18 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(node_txn.len(), 1); + let txn = vec![node_txn[0].clone(), node_txn[0].clone()]; + let dummy_block = create_dummy_block(nodes[0].best_block_hash(), 42, txn); + connect_block(&nodes[1], &dummy_block); - connect_block( - &nodes[1], - &create_dummy_block( - nodes[0].best_block_hash(), - 42, - vec![node_txn[0].clone(), node_txn[0].clone()], - ), - ); check_closed_broadcast!(nodes[1], true); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CommitmentTxConfirmed, - [nodes[0].node.get_our_node_id()], - 100000 - ); + let reason = ClosureReason::CommitmentTxConfirmed; + let node_id_0 = nodes[0].node.get_our_node_id(); + check_closed_event!(nodes[1], 1, reason, false, [node_id_0], 100000); check_added_monitors!(nodes[1], 1); // Make sure everything is persisted as expected after close. - check_persisted_data!(11); + check_persisted_data!(persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1); } diff --git a/src/types.rs b/src/types.rs index ddd587985..2fc1c6488 100644 --- a/src/types.rs +++ b/src/types.rs @@ -19,7 +19,7 @@ use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::persist::{KVStore, KVStoreSync}; +use lightning::util::persist::{KVStore, KVStoreSync, MonitorUpdatingPersister}; use lightning::util::ser::{Readable, Writeable, Writer}; use lightning::util::sweep::OutputSweeper; use lightning_block_sync::gossip::{GossipVerifier, UtxoSource}; @@ -49,13 +49,22 @@ where /// A type alias for [`SyncAndAsyncKVStore`] with `Sync`/`Send` markers; pub type DynStore = dyn SyncAndAsyncKVStore + Sync + Send; +pub type Persister = MonitorUpdatingPersister< + Arc, + Arc, + Arc, + Arc, + Arc, + Arc, +>; + pub(crate) type ChainMonitor = chainmonitor::ChainMonitor< InMemorySigner, Arc, Arc, Arc, Arc, - Arc, + Arc, Arc, >; From 94cc2dca86ea6b96ca12a921af36451175c0984e Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Jan 2025 15:04:33 +0100 Subject: [PATCH 113/184] remove unnecessary arc clone --- src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index c235d2a88..b20101455 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -259,7 +259,6 @@ impl Node { return; } _ = interval.tick() => { - let gossip_sync_logger = Arc::clone(&gossip_sync_logger); let now = Instant::now(); match gossip_source.update_rgs_snapshot().await { Ok(updated_timestamp) => { From e4a0352c6681ecae70491afb48301c2e3b663eb5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 9 Oct 2025 14:11:45 +0200 Subject: [PATCH 114/184] Switch to `nightly` `rustfmt` --- .github/workflows/rust.yml | 1 - rust-toolchain.toml | 4 ++++ rustfmt.toml | 21 ++++++++++++------- src/builder.rs | 7 +++---- src/chain/bitcoind.rs | 10 ++++----- src/io/test_utils.rs | 3 +-- src/io/utils.rs | 9 +++++++- src/lib.rs | 16 +++++++------- .../asynchronous/static_invoice_store.rs | 12 ++++------- src/payment/onchain.rs | 2 +- src/payment/store.rs | 2 +- src/wallet/mod.rs | 2 +- tests/common/mod.rs | 14 ++++++------- tests/integration_tests_rust.rs | 16 +++++++------- 14 files changed, 65 insertions(+), 54 deletions(-) create mode 100644 rust-toolchain.toml diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index aff610908..87249bd72 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -38,7 +38,6 @@ jobs: - name: Install Rust ${{ matrix.toolchain }} toolchain run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} - rustup override set ${{ matrix.toolchain }} - name: Check formatting on Rust ${{ matrix.toolchain }} if: matrix.check-fmt run: rustup component add rustfmt && cargo fmt --all -- --check diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 000000000..d35f8fac6 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "nightly-2025-10-08" +components = [ "rustfmt" ] +profile = "minimal" diff --git a/rustfmt.toml b/rustfmt.toml index 66161555c..26a260b50 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -4,11 +4,16 @@ hard_tabs = true use_field_init_shorthand = true max_width = 100 match_block_trailing_comma = true -# UNSTABLE: format_code_in_doc_comments = true -# UNSTABLE: overflow_delimited_expr = true -# UNSTABLE: comment_width = 100 -# UNSTABLE: format_macro_matchers = true -# UNSTABLE: format_strings = true -# UNSTABLE: group_imports = "StdExternalCrate" -# UNSTABLE: reorder_imports = true -# UNSTABLE: imports_granularity = "Module" +format_code_in_doc_comments = true +overflow_delimited_expr = true +comment_width = 100 +format_macro_matchers = true +group_imports = "StdExternalCrate" +reorder_imports = true +imports_granularity = "Module" +normalize_comments = true +normalize_doc_attributes = true +style_edition = "2021" +# TBD: do we want comment and string wrapping? +#wrap_comments = true +#format_strings = true diff --git a/src/builder.rs b/src/builder.rs index b4a146e7c..0c843447a 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -628,10 +628,9 @@ impl NodeBuilder { derive_xprv(config, &seed_bytes, VSS_HARDENED_CHILD_INDEX, Arc::clone(&logger))?; let lnurl_auth_xprv = vss_xprv - .derive_priv( - &Secp256k1::new(), - &[ChildNumber::Hardened { index: VSS_LNURL_AUTH_HARDENED_CHILD_INDEX }], - ) + .derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { + index: VSS_LNURL_AUTH_HARDENED_CHILD_INDEX, + }]) .map_err(|e| { log_error!(logger, "Failed to derive VSS secret: {}", e); BuildError::KVStoreSetupFailed diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index e97546e88..934e4aabf 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -415,7 +415,7 @@ impl BitcoindChainSource { pub(super) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { macro_rules! get_fee_rate_update { - ($estimation_fut: expr) => {{ + ($estimation_fut:expr) => {{ let update_res = tokio::time::timeout( Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), $estimation_fut, @@ -701,10 +701,10 @@ impl BitcoindClient { let num_blocks_json = serde_json::json!(num_blocks); let estimation_mode_json = serde_json::json!(estimation_mode); rpc_client - .call_method::( - "estimatesmartfee", - &[num_blocks_json, estimation_mode_json], - ) + .call_method::("estimatesmartfee", &[ + num_blocks_json, + estimation_mode_json, + ]) .await .map(|resp| resp.0) } diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 8fbf4279d..59ad09458 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -16,7 +16,6 @@ use lightning::ln::functional_test_utils::{ use lightning::util::persist::{ KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; - use lightning::util::test_utils; use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; use rand::distributions::Alphanumeric; @@ -149,7 +148,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { // Helper to make sure the channel is on the expected update ID. macro_rules! check_persisted_data { - ($expected_update_id: expr) => { + ($expected_update_id:expr) => { persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_0.len(), 1); for (_, mon) in persisted_chan_data_0.iter() { diff --git a/src/io/utils.rs b/src/io/utils.rs index 1556314c4..7b7906d4f 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -399,7 +399,14 @@ pub(crate) fn check_namespace_key_validity( } macro_rules! impl_read_write_change_set_type { - ( $read_name: ident, $write_name: ident, $change_set_type:ty, $primary_namespace: expr, $secondary_namespace: expr, $key: expr ) => { + ( + $read_name:ident, + $write_name:ident, + $change_set_type:ty, + $primary_namespace:expr, + $secondary_namespace:expr, + $key:expr + ) => { pub(crate) fn $read_name( kv_store: Arc, logger: L, ) -> Result, std::io::Error> diff --git a/src/lib.rs b/src/lib.rs index c235d2a88..f07b2def3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,18 +25,21 @@ //! ```no_run //! # #[cfg(not(feature = "uniffi"))] //! # { -//! use ldk_node::Builder; -//! use ldk_node::lightning_invoice::Bolt11Invoice; -//! use ldk_node::lightning::ln::msgs::SocketAddress; -//! use ldk_node::bitcoin::Network; -//! use ldk_node::bitcoin::secp256k1::PublicKey; //! use std::str::FromStr; //! +//! use ldk_node::bitcoin::secp256k1::PublicKey; +//! use ldk_node::bitcoin::Network; +//! use ldk_node::lightning::ln::msgs::SocketAddress; +//! use ldk_node::lightning_invoice::Bolt11Invoice; +//! use ldk_node::Builder; +//! //! fn main() { //! let mut builder = Builder::new(); //! builder.set_network(Network::Testnet); //! builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None); -//! builder.set_gossip_source_rgs("https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string()); +//! builder.set_gossip_source_rgs( +//! "https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string(), +//! ); //! //! let node = builder.build().unwrap(); //! @@ -67,7 +70,6 @@ //! [`stop`]: Node::stop //! [`open_channel`]: Node::open_channel //! [`send`]: Bolt11Payment::send -//! #![cfg_attr(not(feature = "uniffi"), deny(missing_docs))] #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index a7e2d2f9e..7fec4fe9a 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -271,14 +271,10 @@ mod tests { } fn blinded_path() -> BlindedMessagePath { - BlindedMessagePath::from_blinded_path( - pubkey(40), - pubkey(41), - vec![ - BlindedHop { blinded_node_id: pubkey(42), encrypted_payload: vec![0; 43] }, - BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 44] }, - ], - ) + BlindedMessagePath::from_blinded_path(pubkey(40), pubkey(41), vec![ + BlindedHop { blinded_node_id: pubkey(42), encrypted_payload: vec![0; 43] }, + BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 44] }, + ]) } fn pubkey(byte: u8) -> PublicKey { diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index c5100d772..695f96d43 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -23,7 +23,7 @@ type FeeRate = bitcoin::FeeRate; type FeeRate = Arc; macro_rules! maybe_map_fee_rate_opt { - ($fee_rate_opt: expr) => {{ + ($fee_rate_opt:expr) => {{ #[cfg(not(feature = "uniffi"))] { $fee_rate_opt diff --git a/src/payment/store.rs b/src/payment/store.rs index b17898d9c..184de2ea9 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -179,7 +179,7 @@ impl StorableObject for PaymentDetails { let mut updated = false; macro_rules! update_if_necessary { - ($val: expr, $update: expr) => { + ($val:expr, $update:expr) => { if $val != $update { $val = $update; updated = true; diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 8f8151b9c..0f3797431 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -631,7 +631,7 @@ impl Wallet { script_pubkey: ScriptBuf::new_witness_program(&witness_program), }, satisfaction_weight: 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64 + - 1 /* witness items */ + 1 /* schnorr sig len */ + 64, /* schnorr sig */ + 1 /* witness items */ + 1 /* schnorr sig len */ + 64, // schnorr sig }; utxos.push(utxo); }, diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 3ac0e8432..4d02895c7 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -48,7 +48,7 @@ use rand::{thread_rng, Rng}; use serde_json::{json, Value}; macro_rules! expect_event { - ($node: expr, $event_type: ident) => {{ + ($node:expr, $event_type:ident) => {{ match $node.wait_next_event() { ref e @ Event::$event_type { .. } => { println!("{} got event {:?}", $node.node_id(), e); @@ -64,7 +64,7 @@ macro_rules! expect_event { pub(crate) use expect_event; macro_rules! expect_channel_pending_event { - ($node: expr, $counterparty_node_id: expr) => {{ + ($node:expr, $counterparty_node_id:expr) => {{ match $node.wait_next_event() { ref e @ Event::ChannelPending { funding_txo, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); @@ -82,7 +82,7 @@ macro_rules! expect_channel_pending_event { pub(crate) use expect_channel_pending_event; macro_rules! expect_channel_ready_event { - ($node: expr, $counterparty_node_id: expr) => {{ + ($node:expr, $counterparty_node_id:expr) => {{ match $node.wait_next_event() { ref e @ Event::ChannelReady { user_channel_id, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); @@ -100,7 +100,7 @@ macro_rules! expect_channel_ready_event { pub(crate) use expect_channel_ready_event; macro_rules! expect_payment_received_event { - ($node: expr, $amount_msat: expr) => {{ + ($node:expr, $amount_msat:expr) => {{ match $node.wait_next_event() { ref e @ Event::PaymentReceived { payment_id, amount_msat, .. } => { println!("{} got event {:?}", $node.node_id(), e); @@ -122,7 +122,7 @@ macro_rules! expect_payment_received_event { pub(crate) use expect_payment_received_event; macro_rules! expect_payment_claimable_event { - ($node: expr, $payment_id: expr, $payment_hash: expr, $claimable_amount_msat: expr) => {{ + ($node:expr, $payment_id:expr, $payment_hash:expr, $claimable_amount_msat:expr) => {{ match $node.wait_next_event() { ref e @ Event::PaymentClaimable { payment_id, @@ -147,7 +147,7 @@ macro_rules! expect_payment_claimable_event { pub(crate) use expect_payment_claimable_event; macro_rules! expect_payment_successful_event { - ($node: expr, $payment_id: expr, $fee_paid_msat: expr) => {{ + ($node:expr, $payment_id:expr, $fee_paid_msat:expr) => {{ match $node.wait_next_event() { ref e @ Event::PaymentSuccessful { payment_id, fee_paid_msat, .. } => { println!("{} got event {:?}", $node.node_id(), e); @@ -269,7 +269,7 @@ pub(crate) struct TestConfig { } macro_rules! setup_builder { - ($builder: ident, $config: expr) => { + ($builder:ident, $config:expr) => { #[cfg(feature = "uniffi")] let $builder = Builder::from_config($config.clone()); #[cfg(not(feature = "uniffi"))] diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 64a78e11b..804bba876 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -687,7 +687,7 @@ fn run_rbf_test(is_insert_block: bool) { let chain_source_esplora = TestChainSource::Esplora(&electrsd); macro_rules! config_node { - ($chain_source: expr, $anchor_channels: expr) => {{ + ($chain_source:expr, $anchor_channels:expr) => {{ let config_a = random_config($anchor_channels); let node = setup_node(&$chain_source, config_a, None); node @@ -719,7 +719,7 @@ fn run_rbf_test(is_insert_block: bool) { }; } macro_rules! validate_balances { - ($expected_balance_sat: expr, $is_spendable: expr) => { + ($expected_balance_sat:expr, $is_spendable:expr) => { let spend_balance = if $is_spendable { $expected_balance_sat } else { 0 }; for node in &nodes { node.sync_wallets().unwrap(); @@ -972,8 +972,8 @@ fn simple_bolt12_send_receive() { assert_eq!(offer_id, offer.id()); assert_eq!(&expected_quantity, qty); assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. + // TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + // API currently doesn't allow to do that. }, _ => { panic!("Unexpected payment kind"); @@ -1038,8 +1038,8 @@ fn simple_bolt12_send_receive() { assert_eq!(offer_id, offer.id()); assert_eq!(&expected_quantity, qty); assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. + // TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + // API currently doesn't allow to do that. hash.unwrap() }, _ => { @@ -1104,8 +1104,8 @@ fn simple_bolt12_send_receive() { assert!(preimage.is_some()); assert_eq!(&expected_quantity, qty); assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0) - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. + // TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + // API currently doesn't allow to do that. }, _ => { panic!("Unexpected payment kind"); From 9272f3176bd82118ca9fdd1cbd6ace0dc155906a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 17 Oct 2025 12:28:31 +0200 Subject: [PATCH 115/184] Delete `rust-toolchain.toml` afterall We had overlooked that the set toolchain would apply to all components not only the ones listed in `components`. --- rust-toolchain.toml | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 rust-toolchain.toml diff --git a/rust-toolchain.toml b/rust-toolchain.toml deleted file mode 100644 index d35f8fac6..000000000 --- a/rust-toolchain.toml +++ /dev/null @@ -1,4 +0,0 @@ -[toolchain] -channel = "nightly-2025-10-08" -components = [ "rustfmt" ] -profile = "minimal" From e9fe0b3fc8ad65a57c2d15ce785b16d5a597b2ad Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 17 Oct 2025 12:30:37 +0200 Subject: [PATCH 116/184] Disable `overflow_delimited_expr` to ensure stability .. disabling this option avoids `cargo +1.85 fmt` making any changes if run after `cargo +nightly fmt`. --- rustfmt.toml | 2 +- src/builder.rs | 7 ++++--- src/chain/bitcoind.rs | 8 ++++---- src/payment/asynchronous/static_invoice_store.rs | 12 ++++++++---- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index 26a260b50..4900e142f 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -5,7 +5,6 @@ use_field_init_shorthand = true max_width = 100 match_block_trailing_comma = true format_code_in_doc_comments = true -overflow_delimited_expr = true comment_width = 100 format_macro_matchers = true group_imports = "StdExternalCrate" @@ -17,3 +16,4 @@ style_edition = "2021" # TBD: do we want comment and string wrapping? #wrap_comments = true #format_strings = true +#overflow_delimited_expr = true diff --git a/src/builder.rs b/src/builder.rs index 0c843447a..b4a146e7c 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -628,9 +628,10 @@ impl NodeBuilder { derive_xprv(config, &seed_bytes, VSS_HARDENED_CHILD_INDEX, Arc::clone(&logger))?; let lnurl_auth_xprv = vss_xprv - .derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { - index: VSS_LNURL_AUTH_HARDENED_CHILD_INDEX, - }]) + .derive_priv( + &Secp256k1::new(), + &[ChildNumber::Hardened { index: VSS_LNURL_AUTH_HARDENED_CHILD_INDEX }], + ) .map_err(|e| { log_error!(logger, "Failed to derive VSS secret: {}", e); BuildError::KVStoreSetupFailed diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 934e4aabf..a0151e5a2 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -701,10 +701,10 @@ impl BitcoindClient { let num_blocks_json = serde_json::json!(num_blocks); let estimation_mode_json = serde_json::json!(estimation_mode); rpc_client - .call_method::("estimatesmartfee", &[ - num_blocks_json, - estimation_mode_json, - ]) + .call_method::( + "estimatesmartfee", + &[num_blocks_json, estimation_mode_json], + ) .await .map(|resp| resp.0) } diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index 7fec4fe9a..a7e2d2f9e 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -271,10 +271,14 @@ mod tests { } fn blinded_path() -> BlindedMessagePath { - BlindedMessagePath::from_blinded_path(pubkey(40), pubkey(41), vec![ - BlindedHop { blinded_node_id: pubkey(42), encrypted_payload: vec![0; 43] }, - BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 44] }, - ]) + BlindedMessagePath::from_blinded_path( + pubkey(40), + pubkey(41), + vec![ + BlindedHop { blinded_node_id: pubkey(42), encrypted_payload: vec![0; 43] }, + BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 44] }, + ], + ) } fn pubkey(byte: u8) -> PublicKey { From a73d0900d054637732598f82bf848daa34263e0d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 17 Oct 2025 12:32:15 +0200 Subject: [PATCH 117/184] Set `rust-version` field in `Cargo.toml` .. we indicate our MSRV in `Cargo.toml`. --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index 7b888c929..a70e74dd4 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" edition = "2021" +rust-version = "1.85" description = "A ready-to-go node implementation built using LDK." repository = "https://github.com/lightningdevkit/ldk-node/" readme = "README.md" From 2590ee5d09fc8ae327043df42d3df7840f77c784 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 17 Oct 2025 12:34:31 +0200 Subject: [PATCH 118/184] Add weekly cronjob running `cargo +nightly fmt` .. which we steal from `rust-bitcoin`. --- .github/workflows/cron-weekly-rustfmt.yml | 28 +++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/workflows/cron-weekly-rustfmt.yml diff --git a/.github/workflows/cron-weekly-rustfmt.yml b/.github/workflows/cron-weekly-rustfmt.yml new file mode 100644 index 000000000..626953c8e --- /dev/null +++ b/.github/workflows/cron-weekly-rustfmt.yml @@ -0,0 +1,28 @@ +name: Nightly rustfmt +on: + schedule: + - cron: "0 0 * * 0" # runs weekly on Sunday at 00:00 + workflow_dispatch: # allows manual triggering +jobs: + format: + name: Nightly rustfmt + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + components: rustfmt + - name: Run Nightly rustfmt + # Run the formatter and manually remove trailing whitespace. + run: cargo +nightly fmt && git ls-files -- '*.rs' -z | xargs sed -E -i'' -e 's/[[:space:]]+$//' + - name: Get the current date + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_ENV + - name: Create Pull Request + uses: peter-evans/create-pull-request@v7 + with: + author: Fmt Bot + title: Automated nightly rustfmt (${{ env.date }}) + body: | + Automated nightly `rustfmt` changes by [create-pull-request](https://github.com/peter-evans/create-pull-request) GitHub action + commit-message: ${{ env.date }} automated rustfmt nightly + labels: rustfmt From edff34ce6454654635f519f5e0bc68ca93c8ccbb Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 14 Oct 2025 11:20:11 +0200 Subject: [PATCH 119/184] Drop superfluous `cargo build` step .. as we're about to `cargo test` anyways. --- .github/workflows/vss-integration.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index 81b63fdf9..5f6e6065b 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -44,5 +44,4 @@ jobs: run: | cd ldk-node export TEST_VSS_BASE_URL="http://localhost:8080/vss" - RUSTFLAGS="--cfg vss_test" cargo build --verbose --color always RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss From ae98befd9c66c0f9086a203efb7bdd0eab0892cf Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 14 Oct 2025 13:03:05 +0200 Subject: [PATCH 120/184] Add missing error logs for `ReadFailed` cases .. some of the `ReadFailed` cases didn't log why they failed. Here we fix that oversight. --- src/builder.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 0c843447a..195ac65c3 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1138,6 +1138,7 @@ fn build_with_store_internal( if e.kind() == std::io::ErrorKind::NotFound { Arc::new(RwLock::new(NodeMetrics::default())) } else { + log_error!(logger, "Failed to read node metrics from store: {}", e); return Err(BuildError::ReadFailed); } }, @@ -1201,7 +1202,8 @@ fn build_with_store_internal( Arc::clone(&kv_store), Arc::clone(&logger), )), - Err(_) => { + Err(e) => { + log_error!(logger, "Failed to read payment data from store: {}", e); return Err(BuildError::ReadFailed); }, }; @@ -1334,7 +1336,7 @@ fn build_with_store_internal( if e.kind() == lightning::io::ErrorKind::NotFound { Vec::new() } else { - log_error!(logger, "Failed to read channel monitors: {}", e.to_string()); + log_error!(logger, "Failed to read channel monitors from store: {}", e.to_string()); return Err(BuildError::ReadFailed); } }, @@ -1359,6 +1361,7 @@ fn build_with_store_internal( if e.kind() == std::io::ErrorKind::NotFound { Arc::new(Graph::new(config.network.into(), Arc::clone(&logger))) } else { + log_error!(logger, "Failed to read network graph from store: {}", e); return Err(BuildError::ReadFailed); } }, @@ -1379,6 +1382,7 @@ fn build_with_store_internal( Arc::clone(&logger), ))) } else { + log_error!(logger, "Failed to read scoring data from store: {}", e); return Err(BuildError::ReadFailed); } }, @@ -1448,7 +1452,7 @@ fn build_with_store_internal( ); let (_hash, channel_manager) = <(BlockHash, ChannelManager)>::read(&mut reader, read_args).map_err(|e| { - log_error!(logger, "Failed to read channel manager from KVStore: {}", e); + log_error!(logger, "Failed to read channel manager from store: {}", e); BuildError::ReadFailed })?; channel_manager @@ -1677,6 +1681,7 @@ fn build_with_store_internal( Arc::clone(&logger), )) } else { + log_error!(logger, "Failed to read output sweeper data from store: {}", e); return Err(BuildError::ReadFailed); } }, @@ -1689,6 +1694,7 @@ fn build_with_store_internal( if e.kind() == std::io::ErrorKind::NotFound { Arc::new(EventQueue::new(Arc::clone(&kv_store), Arc::clone(&logger))) } else { + log_error!(logger, "Failed to read event queue from store: {}", e); return Err(BuildError::ReadFailed); } }, @@ -1700,6 +1706,7 @@ fn build_with_store_internal( if e.kind() == std::io::ErrorKind::NotFound { Arc::new(PeerStore::new(Arc::clone(&kv_store), Arc::clone(&logger))) } else { + log_error!(logger, "Failed to read peer data from store: {}", e); return Err(BuildError::ReadFailed); } }, From 49502cff8663b023d4f7ed5c0cf94e6330a24f2d Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Jan 2025 14:10:03 +0100 Subject: [PATCH 121/184] add functionality to periodically update routing scores from an external http source --- bindings/ldk_node.udl | 2 + src/builder.rs | 44 ++++++++++++++---- src/config.rs | 6 +++ src/lib.rs | 24 ++++++++++ src/scoring.rs | 104 ++++++++++++++++++++++++++++++++++++++++++ src/types.rs | 5 +- 6 files changed, 175 insertions(+), 10 deletions(-) create mode 100644 src/scoring.rs diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index bd1e4fc43..9da0d89b6 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -81,6 +81,7 @@ interface Builder { void set_chain_source_bitcoind_rest(string rest_host, u16 rest_port, string rpc_host, u16 rpc_port, string rpc_user, string rpc_password); void set_gossip_source_p2p(); void set_gossip_source_rgs(string rgs_server_url); + void set_pathfinding_scores_source(string url); void set_liquidity_source_lsps1(PublicKey node_id, SocketAddress address, string? token); void set_liquidity_source_lsps2(PublicKey node_id, SocketAddress address, string? token); void set_storage_dir_path(string storage_dir_path); @@ -330,6 +331,7 @@ dictionary NodeStatus { u64? latest_onchain_wallet_sync_timestamp; u64? latest_fee_rate_cache_update_timestamp; u64? latest_rgs_snapshot_timestamp; + u64? latest_pathfinding_scores_sync_timestamp; u64? latest_node_announcement_broadcast_timestamp; u32? latest_channel_monitor_archival_height; }; diff --git a/src/builder.rs b/src/builder.rs index b4a146e7c..af2d92463 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -27,7 +27,8 @@ use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; use lightning::routing::gossip::NodeAlias; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ - ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, + CombinedScorer, ProbabilisticScorer, ProbabilisticScoringDecayParameters, + ProbabilisticScoringFeeParameters, }; use lightning::sign::{EntropySource, NodeSigner}; use lightning::util::persist::{ @@ -110,6 +111,11 @@ enum GossipSourceConfig { RapidGossipSync(String), } +#[derive(Debug, Clone)] +struct PathfindingScoresSyncConfig { + url: String, +} + #[derive(Debug, Clone, Default)] struct LiquiditySourceConfig { // Act as an LSPS1 client connecting to the given service. @@ -243,6 +249,7 @@ pub struct NodeBuilder { log_writer_config: Option, async_payments_role: Option, runtime_handle: Option, + pathfinding_scores_sync_config: Option, } impl NodeBuilder { @@ -260,6 +267,7 @@ impl NodeBuilder { let liquidity_source_config = None; let log_writer_config = None; let runtime_handle = None; + let pathfinding_scores_sync_config = None; Self { config, entropy_source_config, @@ -269,6 +277,7 @@ impl NodeBuilder { log_writer_config, runtime_handle, async_payments_role: None, + pathfinding_scores_sync_config, } } @@ -411,6 +420,14 @@ impl NodeBuilder { self } + /// Configures the [`Node`] instance to source its external scores from the given URL. + /// + /// The external scores are merged into the local scoring system to improve routing. + pub fn set_pathfinding_scores_source(&mut self, url: String) -> &mut Self { + self.pathfinding_scores_sync_config = Some(PathfindingScoresSyncConfig { url }); + self + } + /// Configures the [`Node`] instance to source inbound liquidity from the given /// [bLIP-51 / LSPS1] service. /// @@ -718,6 +735,7 @@ impl NodeBuilder { self.chain_data_source_config.as_ref(), self.gossip_source_config.as_ref(), self.liquidity_source_config.as_ref(), + self.pathfinding_scores_sync_config.as_ref(), self.async_payments_role, seed_bytes, runtime, @@ -751,6 +769,7 @@ impl NodeBuilder { self.chain_data_source_config.as_ref(), self.gossip_source_config.as_ref(), self.liquidity_source_config.as_ref(), + self.pathfinding_scores_sync_config.as_ref(), self.async_payments_role, seed_bytes, runtime, @@ -910,6 +929,13 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_gossip_source_rgs(rgs_server_url); } + /// Configures the [`Node`] instance to source its external scores from the given URL. + /// + /// The external scores are merged into the local scoring system to improve routing. + pub fn set_pathfinding_scores_source(&self, url: String) { + self.inner.write().unwrap().set_pathfinding_scores_source(url); + } + /// Configures the [`Node`] instance to source inbound liquidity from the given /// [bLIP-51 / LSPS1] service. /// @@ -1110,6 +1136,7 @@ fn build_with_store_internal( config: Arc, chain_data_source_config: Option<&ChainDataSourceConfig>, gossip_source_config: Option<&GossipSourceConfig>, liquidity_source_config: Option<&LiquiditySourceConfig>, + pathfinding_scores_sync_config: Option<&PathfindingScoresSyncConfig>, async_payments_role: Option, seed_bytes: [u8; 64], runtime: Arc, logger: Arc, kv_store: Arc, ) -> Result { @@ -1365,26 +1392,24 @@ fn build_with_store_internal( }, }; - let scorer = match io::utils::read_scorer( + let local_scorer = match io::utils::read_scorer( Arc::clone(&kv_store), Arc::clone(&network_graph), Arc::clone(&logger), ) { - Ok(scorer) => Arc::new(Mutex::new(scorer)), + Ok(scorer) => scorer, Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { let params = ProbabilisticScoringDecayParameters::default(); - Arc::new(Mutex::new(ProbabilisticScorer::new( - params, - Arc::clone(&network_graph), - Arc::clone(&logger), - ))) + ProbabilisticScorer::new(params, Arc::clone(&network_graph), Arc::clone(&logger)) } else { return Err(BuildError::ReadFailed); } }, }; + let scorer = Arc::new(Mutex::new(CombinedScorer::new(local_scorer))); + let scoring_fee_params = ProbabilisticScoringFeeParameters::default(); let router = Arc::new(DefaultRouter::new( Arc::clone(&network_graph), @@ -1716,6 +1741,8 @@ fn build_with_store_internal( let (background_processor_stop_sender, _) = tokio::sync::watch::channel(()); let is_running = Arc::new(RwLock::new(false)); + let pathfinding_scores_sync_url = pathfinding_scores_sync_config.map(|c| c.url.clone()); + Ok(Node { runtime, stop_sender, @@ -1734,6 +1761,7 @@ fn build_with_store_internal( keys_manager, network_graph, gossip_source, + pathfinding_scores_sync_url, liquidity_source, kv_store, logger, diff --git a/src/config.rs b/src/config.rs index d221dd6c3..ce361c45a 100644 --- a/src/config.rs +++ b/src/config.rs @@ -63,6 +63,9 @@ pub(crate) const PEER_RECONNECTION_INTERVAL: Duration = Duration::from_secs(60); // The time in-between RGS sync attempts. pub(crate) const RGS_SYNC_INTERVAL: Duration = Duration::from_secs(60 * 60); +// The time in-between external scores sync attempts. +pub(crate) const EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL: Duration = Duration::from_secs(60 * 60); + // The time in-between node announcement broadcast attempts. pub(crate) const NODE_ANN_BCAST_INTERVAL: Duration = Duration::from_secs(60 * 60); @@ -93,6 +96,9 @@ pub(crate) const RGS_SYNC_TIMEOUT_SECS: u64 = 5; /// The length in bytes of our wallets' keys seed. pub const WALLET_KEYS_SEED_LEN: usize = 64; +// The timeout after which we abort a external scores sync operation. +pub(crate) const EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS: u64 = 5; + #[derive(Debug, Clone)] /// Represents the configuration of an [`Node`] instance. /// diff --git a/src/lib.rs b/src/lib.rs index b20101455..253a584f9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -95,6 +95,7 @@ mod message_handler; pub mod payment; mod peer_store; mod runtime; +mod scoring; mod tx_broadcaster; mod types; mod wallet; @@ -104,6 +105,7 @@ use std::net::ToSocketAddrs; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use crate::scoring::setup_background_pathfinding_scores_sync; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; use bitcoin::secp256k1::PublicKey; #[cfg(feature = "uniffi")] @@ -154,6 +156,7 @@ use types::{ pub use types::{ ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, }; + pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, vss_client, @@ -183,6 +186,7 @@ pub struct Node { keys_manager: Arc, network_graph: Arc, gossip_source: Arc, + pathfinding_scores_sync_url: Option, liquidity_source: Option>>>, kv_store: Arc, logger: Arc, @@ -290,6 +294,18 @@ impl Node { }); } + if let Some(pathfinding_scores_sync_url) = self.pathfinding_scores_sync_url.as_ref() { + setup_background_pathfinding_scores_sync( + pathfinding_scores_sync_url.clone(), + Arc::clone(&self.scorer), + Arc::clone(&self.node_metrics), + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.runtime), + self.stop_sender.subscribe(), + ); + } + if let Some(listening_addresses) = &self.config.listening_addresses { // Setup networking let peer_manager_connection_handler = Arc::clone(&self.peer_manager); @@ -691,6 +707,8 @@ impl Node { locked_node_metrics.latest_fee_rate_cache_update_timestamp; let latest_rgs_snapshot_timestamp = locked_node_metrics.latest_rgs_snapshot_timestamp.map(|val| val as u64); + let latest_pathfinding_scores_sync_timestamp = + locked_node_metrics.latest_pathfinding_scores_sync_timestamp; let latest_node_announcement_broadcast_timestamp = locked_node_metrics.latest_node_announcement_broadcast_timestamp; let latest_channel_monitor_archival_height = @@ -703,6 +721,7 @@ impl Node { latest_onchain_wallet_sync_timestamp, latest_fee_rate_cache_update_timestamp, latest_rgs_snapshot_timestamp, + latest_pathfinding_scores_sync_timestamp, latest_node_announcement_broadcast_timestamp, latest_channel_monitor_archival_height, } @@ -1530,6 +1549,8 @@ pub struct NodeStatus { /// /// Will be `None` if RGS isn't configured or the snapshot hasn't been updated yet. pub latest_rgs_snapshot_timestamp: Option, + /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully merged external scores. + pub latest_pathfinding_scores_sync_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when we last broadcasted a node /// announcement. /// @@ -1548,6 +1569,7 @@ pub(crate) struct NodeMetrics { latest_onchain_wallet_sync_timestamp: Option, latest_fee_rate_cache_update_timestamp: Option, latest_rgs_snapshot_timestamp: Option, + latest_pathfinding_scores_sync_timestamp: Option, latest_node_announcement_broadcast_timestamp: Option, latest_channel_monitor_archival_height: Option, } @@ -1559,6 +1581,7 @@ impl Default for NodeMetrics { latest_onchain_wallet_sync_timestamp: None, latest_fee_rate_cache_update_timestamp: None, latest_rgs_snapshot_timestamp: None, + latest_pathfinding_scores_sync_timestamp: None, latest_node_announcement_broadcast_timestamp: None, latest_channel_monitor_archival_height: None, } @@ -1567,6 +1590,7 @@ impl Default for NodeMetrics { impl_writeable_tlv_based!(NodeMetrics, { (0, latest_lightning_wallet_sync_timestamp, option), + (1, latest_pathfinding_scores_sync_timestamp, option), (2, latest_onchain_wallet_sync_timestamp, option), (4, latest_fee_rate_cache_update_timestamp, option), (6, latest_rgs_snapshot_timestamp, option), diff --git a/src/scoring.rs b/src/scoring.rs new file mode 100644 index 000000000..e244ab258 --- /dev/null +++ b/src/scoring.rs @@ -0,0 +1,104 @@ +use std::{ + io::Cursor, + sync::{Arc, Mutex, RwLock}, + time::{Duration, SystemTime}, +}; + +use crate::{ + config::{ + EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, + }, + logger::LdkLogger, + runtime::Runtime, + NodeMetrics, Scorer, +}; +use crate::{write_node_metrics, DynStore, Logger}; +use lightning::{ + log_error, log_info, log_trace, routing::scoring::ChannelLiquidities, util::ser::Readable, +}; + +/// Start a background task that periodically downloads scores via an external url and merges them into the local +/// pathfinding scores. +pub fn setup_background_pathfinding_scores_sync( + url: String, scorer: Arc>, node_metrics: Arc>, + kv_store: Arc, logger: Arc, runtime: Arc, + mut stop_receiver: tokio::sync::watch::Receiver<()>, +) { + log_info!(logger, "External scores background syncing enabled from {}", url); + + let logger = Arc::clone(&logger); + + runtime.spawn_background_processor_task(async move { + let mut interval = tokio::time::interval(EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL); + loop { + tokio::select! { + _ = stop_receiver.changed() => { + log_trace!( + logger, + "Stopping background syncing external scores.", + ); + return; + } + _ = interval.tick() => { + log_trace!( + logger, + "Background sync of external scores started.", + ); + + sync_external_scores(logger.as_ref(), scorer.as_ref(), node_metrics.as_ref(), Arc::clone(&kv_store), &url).await; + } + } + } + }); +} + +async fn sync_external_scores( + logger: &Logger, scorer: &Mutex, node_metrics: &RwLock, + kv_store: Arc, url: &String, +) -> () { + let response = tokio::time::timeout( + Duration::from_secs(EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS), + reqwest::get(url), + ) + .await; + + let response = match response { + Ok(resp) => resp, + Err(e) => { + log_error!(logger, "Retrieving external scores timed out: {}", e); + return; + }, + }; + let response = match response { + Ok(resp) => resp, + Err(e) => { + log_error!(logger, "Failed to retrieve external scores update: {}", e); + return; + }, + }; + let body = match response.bytes().await { + Ok(bytes) => bytes, + Err(e) => { + log_error!(logger, "Failed to read external scores update: {}", e); + return; + }, + }; + let mut reader = Cursor::new(body); + match ChannelLiquidities::read(&mut reader) { + Ok(liquidities) => { + let duration_since_epoch = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); + scorer.lock().unwrap().merge(liquidities, duration_since_epoch); + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_pathfinding_scores_sync_timestamp = + Some(duration_since_epoch.as_secs()); + write_node_metrics(&*locked_node_metrics, kv_store, logger).unwrap_or_else(|e| { + log_error!(logger, "Persisting node metrics failed: {}", e); + }); + log_trace!(logger, "External scores merged successfully"); + }, + Err(e) => { + log_error!(logger, "Failed to parse external scores update: {}", e); + }, + } +} diff --git a/src/types.rs b/src/types.rs index 2fc1c6488..800d9462d 100644 --- a/src/types.rs +++ b/src/types.rs @@ -17,7 +17,8 @@ use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::ln::types::ChannelId; use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; -use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; +use lightning::routing::scoring::CombinedScorer; +use lightning::routing::scoring::ProbabilisticScoringFeeParameters; use lightning::sign::InMemorySigner; use lightning::util::persist::{KVStore, KVStoreSync, MonitorUpdatingPersister}; use lightning::util::ser::{Readable, Writeable, Writer}; @@ -114,7 +115,7 @@ pub(crate) type Router = DefaultRouter< ProbabilisticScoringFeeParameters, Scorer, >; -pub(crate) type Scorer = ProbabilisticScorer, Arc>; +pub(crate) type Scorer = CombinedScorer, Arc>; pub(crate) type Graph = gossip::NetworkGraph>; From 52705d333ca67c36cc9f1473fdda0b23088495d5 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 10 Feb 2025 14:07:00 +0100 Subject: [PATCH 122/184] cache external pathfinding scores Save external pathfinding scores in a cache so that they will be available immediately after a node restart. Otherwise there might be a time window where new scores need to be downloaded still and the node operates on local data only. --- src/builder.rs | 19 ++++++++++++++++- src/io/utils.rs | 55 +++++++++++++++++++++++++++++++++++++++++++++++-- src/scoring.rs | 18 +++++++++++++--- 3 files changed, 86 insertions(+), 6 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index af2d92463..f3a57e085 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -24,6 +24,7 @@ use lightning::io::Cursor; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; +use lightning::log_trace; use lightning::routing::gossip::NodeAlias; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ @@ -51,7 +52,9 @@ use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; -use crate::io::utils::{read_node_metrics, write_node_metrics}; +use crate::io::utils::{ + read_external_pathfinding_scores_from_cache, read_node_metrics, write_node_metrics, +}; use crate::io::vss_store::VssStore; use crate::io::{ self, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, @@ -1410,6 +1413,20 @@ fn build_with_store_internal( let scorer = Arc::new(Mutex::new(CombinedScorer::new(local_scorer))); + // Restore external pathfinding scores from cache if possible. + match read_external_pathfinding_scores_from_cache(Arc::clone(&kv_store), Arc::clone(&logger)) { + Ok(external_scores) => { + scorer.lock().unwrap().merge(external_scores, cur_time); + log_trace!(logger, "External scores from cache merged successfully"); + }, + Err(e) => { + if e.kind() != std::io::ErrorKind::NotFound { + log_error!(logger, "Error while reading external scores from cache: {}", e); + return Err(BuildError::ReadFailed); + } + }, + } + let scoring_fee_params = ProbabilisticScoringFeeParameters::default(); let router = Arc::new(DefaultRouter::new( Arc::clone(&network_graph), diff --git a/src/io/utils.rs b/src/io/utils.rs index 1556314c4..6fb672e36 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -22,9 +22,11 @@ use bitcoin::Network; use lightning::io::Cursor; use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; -use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringDecayParameters}; +use lightning::routing::scoring::{ + ChannelLiquidities, ProbabilisticScorer, ProbabilisticScoringDecayParameters, +}; use lightning::util::persist::{ - KVStoreSync, KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, + KVStore, KVStoreSync, KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, @@ -48,6 +50,8 @@ use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; +pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_scores_cache"; + /// Generates a random [BIP 39] mnemonic. /// /// The result may be used to initialize the [`Node`] entropy, i.e., can be given to @@ -164,6 +168,53 @@ where }) } +/// Read previously persisted external pathfinding scores from the cache. +pub(crate) fn read_external_pathfinding_scores_from_cache( + kv_store: Arc, logger: L, +) -> Result +where + L::Target: LdkLogger, +{ + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, + )?); + ChannelLiquidities::read(&mut reader).map_err(|e| { + log_error!(logger, "Failed to deserialize scorer: {}", e); + std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Scorer") + }) +} + +/// Persist external pathfinding scores to the cache. +pub(crate) async fn write_external_pathfinding_scores_to_cache( + kv_store: Arc, data: &ChannelLiquidities, logger: L, +) -> Result<(), Error> +where + L::Target: LdkLogger, +{ + KVStore::write( + &*kv_store, + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, + data.encode(), + ) + .await + .map_err(|e| { + log_error!( + logger, + "Writing data to key {}/{}/{} failed due to: {}", + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, + e + ); + Error::PersistenceFailed + }) +} + /// Read previously persisted events from the store. pub(crate) fn read_event_queue( kv_store: Arc, logger: L, diff --git a/src/scoring.rs b/src/scoring.rs index e244ab258..107f63f65 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -8,6 +8,7 @@ use crate::{ config::{ EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, }, + io::utils::write_external_pathfinding_scores_to_cache, logger::LdkLogger, runtime::Runtime, NodeMetrics, Scorer, @@ -86,15 +87,26 @@ async fn sync_external_scores( let mut reader = Cursor::new(body); match ChannelLiquidities::read(&mut reader) { Ok(liquidities) => { + if let Err(e) = write_external_pathfinding_scores_to_cache( + Arc::clone(&kv_store), + &liquidities, + logger, + ) + .await + { + log_error!(logger, "Failed to persist external scores to cache: {}", e); + } + let duration_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); scorer.lock().unwrap().merge(liquidities, duration_since_epoch); let mut locked_node_metrics = node_metrics.write().unwrap(); locked_node_metrics.latest_pathfinding_scores_sync_timestamp = Some(duration_since_epoch.as_secs()); - write_node_metrics(&*locked_node_metrics, kv_store, logger).unwrap_or_else(|e| { - log_error!(logger, "Persisting node metrics failed: {}", e); - }); + write_node_metrics(&*locked_node_metrics, Arc::clone(&kv_store), logger) + .unwrap_or_else(|e| { + log_error!(logger, "Persisting node metrics failed: {}", e); + }); log_trace!(logger, "External scores merged successfully"); }, Err(e) => { From b8cf41c1b5d9936230b8c106ca135bf704348ac6 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 16 Oct 2025 10:47:20 +0200 Subject: [PATCH 123/184] Drop `Condvar` and use `block_on` for `wait_next_event` Given we regularly run into issues arising from mixing sync and async contexts, we here simplify our `EventQueue` implementation by avoiding to use `Condvar::wait_while` (which parks the current thread) and rather simply us `block_on` on our `next_event_async` method. --- src/event.rs | 18 +++--------------- src/lib.rs | 5 ++++- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/src/event.rs b/src/event.rs index db6ef13f1..c9881fc22 100644 --- a/src/event.rs +++ b/src/event.rs @@ -9,7 +9,7 @@ use core::future::Future; use core::task::{Poll, Waker}; use std::collections::VecDeque; use std::ops::Deref; -use std::sync::{Arc, Condvar, Mutex}; +use std::sync::{Arc, Mutex}; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::secp256k1::PublicKey; @@ -287,7 +287,6 @@ where { queue: Arc>>, waker: Arc>>, - notifier: Condvar, kv_store: Arc, logger: L, } @@ -299,8 +298,7 @@ where pub(crate) fn new(kv_store: Arc, logger: L) -> Self { let queue = Arc::new(Mutex::new(VecDeque::new())); let waker = Arc::new(Mutex::new(None)); - let notifier = Condvar::new(); - Self { queue, waker, notifier, kv_store, logger } + Self { queue, waker, kv_store, logger } } pub(crate) fn add_event(&self, event: Event) -> Result<(), Error> { @@ -310,8 +308,6 @@ where self.persist_queue(&locked_queue)?; } - self.notifier.notify_one(); - if let Some(waker) = self.waker.lock().unwrap().take() { waker.wake(); } @@ -327,19 +323,12 @@ where EventFuture { event_queue: Arc::clone(&self.queue), waker: Arc::clone(&self.waker) }.await } - pub(crate) fn wait_next_event(&self) -> Event { - let locked_queue = - self.notifier.wait_while(self.queue.lock().unwrap(), |queue| queue.is_empty()).unwrap(); - locked_queue.front().unwrap().clone() - } - pub(crate) fn event_handled(&self) -> Result<(), Error> { { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.pop_front(); self.persist_queue(&locked_queue)?; } - self.notifier.notify_one(); if let Some(waker) = self.waker.lock().unwrap().take() { waker.wake(); @@ -383,8 +372,7 @@ where let read_queue: EventQueueDeserWrapper = Readable::read(reader)?; let queue = Arc::new(Mutex::new(read_queue.0)); let waker = Arc::new(Mutex::new(None)); - let notifier = Condvar::new(); - Ok(Self { queue, waker, notifier, kv_store, logger }) + Ok(Self { queue, waker, kv_store, logger }) } } diff --git a/src/lib.rs b/src/lib.rs index f07b2def3..2ea704d27 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -749,7 +749,10 @@ impl Node { /// **Caution:** Users must handle events as quickly as possible to prevent a large event backlog, /// which can increase the memory footprint of [`Node`]. pub fn wait_next_event(&self) -> Event { - self.event_queue.wait_next_event() + let fut = self.event_queue.next_event_async(); + // We use our runtime for the sync variant to ensure `tokio::task::block_in_place` is + // always called if we'd ever hit this in an outer runtime context. + self.runtime.block_on(fut) } /// Confirm the last retrieved event handled. From 05dab40f64071875d9471574b91366e2625883a9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 16 Oct 2025 11:38:55 +0200 Subject: [PATCH 124/184] Async'ify our test suite .. as LDK Node is moving towards a more `async` core, it starts to make sense to switch our test suite over to be `async`. This change should make our CI more efficient (as not every node will spawn its independent runtime, but we just have one runtime per test created) and also makes sure we won't run into any edge cases arising from blocking test threads that are executing other async tasks. --- src/event.rs | 30 +-- tests/common/mod.rs | 100 +++++----- tests/integration_tests_cln.rs | 15 +- tests/integration_tests_lnd.rs | 9 +- tests/integration_tests_rust.rs | 319 +++++++++++++++++--------------- tests/integration_tests_vss.rs | 7 +- tests/reorg_test.rs | 314 ++++++++++++++++--------------- 7 files changed, 402 insertions(+), 392 deletions(-) diff --git a/src/event.rs b/src/event.rs index c9881fc22..eedfb1c14 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1625,7 +1625,6 @@ mod tests { // Check we get the expected event and that it is returned until we mark it handled. for _ in 0..5 { - assert_eq!(event_queue.wait_next_event(), expected_event); assert_eq!(event_queue.next_event_async().await, expected_event); assert_eq!(event_queue.next_event(), Some(expected_event.clone())); } @@ -1640,7 +1639,7 @@ mod tests { .unwrap(); let deser_event_queue = EventQueue::read(&mut &persisted_bytes[..], (Arc::clone(&store), logger)).unwrap(); - assert_eq!(deser_event_queue.wait_next_event(), expected_event); + assert_eq!(deser_event_queue.next_event_async().await, expected_event); event_queue.event_handled().unwrap(); assert_eq!(event_queue.next_event(), None); @@ -1709,32 +1708,5 @@ mod tests { } } assert_eq!(event_queue.next_event(), None); - - // Check we operate correctly, even when mixing and matching blocking and async API calls. - let (tx, mut rx) = tokio::sync::watch::channel(()); - let thread_queue = Arc::clone(&event_queue); - let thread_event = expected_event.clone(); - std::thread::spawn(move || { - let e = thread_queue.wait_next_event(); - assert_eq!(e, thread_event); - thread_queue.event_handled().unwrap(); - tx.send(()).unwrap(); - }); - - let thread_queue = Arc::clone(&event_queue); - let thread_event = expected_event.clone(); - std::thread::spawn(move || { - // Sleep a bit before we enqueue the events everybody is waiting for. - std::thread::sleep(Duration::from_millis(20)); - thread_queue.add_event(thread_event.clone()).unwrap(); - thread_queue.add_event(thread_event.clone()).unwrap(); - }); - - let e = event_queue.next_event_async().await; - assert_eq!(e, expected_event.clone()); - event_queue.event_handled().unwrap(); - - rx.changed().await.unwrap(); - assert_eq!(event_queue.next_event(), None); } } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 4d02895c7..05326b03d 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -49,7 +49,7 @@ use serde_json::{json, Value}; macro_rules! expect_event { ($node:expr, $event_type:ident) => {{ - match $node.wait_next_event() { + match $node.next_event_async().await { ref e @ Event::$event_type { .. } => { println!("{} got event {:?}", $node.node_id(), e); $node.event_handled().unwrap(); @@ -65,7 +65,7 @@ pub(crate) use expect_event; macro_rules! expect_channel_pending_event { ($node:expr, $counterparty_node_id:expr) => {{ - match $node.wait_next_event() { + match $node.next_event_async().await { ref e @ Event::ChannelPending { funding_txo, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(counterparty_node_id, $counterparty_node_id); @@ -83,7 +83,7 @@ pub(crate) use expect_channel_pending_event; macro_rules! expect_channel_ready_event { ($node:expr, $counterparty_node_id:expr) => {{ - match $node.wait_next_event() { + match $node.next_event_async().await { ref e @ Event::ChannelReady { user_channel_id, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(counterparty_node_id, Some($counterparty_node_id)); @@ -101,7 +101,7 @@ pub(crate) use expect_channel_ready_event; macro_rules! expect_payment_received_event { ($node:expr, $amount_msat:expr) => {{ - match $node.wait_next_event() { + match $node.next_event_async().await { ref e @ Event::PaymentReceived { payment_id, amount_msat, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(amount_msat, $amount_msat); @@ -123,7 +123,7 @@ pub(crate) use expect_payment_received_event; macro_rules! expect_payment_claimable_event { ($node:expr, $payment_id:expr, $payment_hash:expr, $claimable_amount_msat:expr) => {{ - match $node.wait_next_event() { + match $node.next_event_async().await { ref e @ Event::PaymentClaimable { payment_id, payment_hash, @@ -148,7 +148,7 @@ pub(crate) use expect_payment_claimable_event; macro_rules! expect_payment_successful_event { ($node:expr, $payment_id:expr, $fee_paid_msat:expr) => {{ - match $node.wait_next_event() { + match $node.next_event_async().await { ref e @ Event::PaymentSuccessful { payment_id, fee_paid_msat, .. } => { println!("{} got event {:?}", $node.node_id(), e); if let Some(fee_msat) = $fee_paid_msat { @@ -389,7 +389,7 @@ pub(crate) fn setup_node_for_async_payments( node } -pub(crate) fn generate_blocks_and_wait( +pub(crate) async fn generate_blocks_and_wait( bitcoind: &BitcoindClient, electrs: &E, num: usize, ) { let _ = bitcoind.create_wallet("ldk_node_test"); @@ -400,7 +400,7 @@ pub(crate) fn generate_blocks_and_wait( let address = bitcoind.new_address().expect("failed to get new address"); // TODO: expect this Result once the WouldBlock issue is resolved upstream. let _block_hashes_res = bitcoind.generate_to_address(num, &address); - wait_for_block(electrs, cur_height as usize + num); + wait_for_block(electrs, cur_height as usize + num).await; print!(" Done!"); println!("\n"); } @@ -420,14 +420,14 @@ pub(crate) fn invalidate_blocks(bitcoind: &BitcoindClient, num_blocks: usize) { assert!(new_cur_height + num_blocks == cur_height); } -pub(crate) fn wait_for_block(electrs: &E, min_height: usize) { +pub(crate) async fn wait_for_block(electrs: &E, min_height: usize) { let mut header = match electrs.block_headers_subscribe() { Ok(header) => header, Err(_) => { // While subscribing should succeed the first time around, we ran into some cases where // it didn't. Since we can't proceed without subscribing, we try again after a delay // and panic if it still fails. - std::thread::sleep(Duration::from_secs(3)); + tokio::time::sleep(Duration::from_secs(3)).await; electrs.block_headers_subscribe().expect("failed to subscribe to block headers") }, }; @@ -438,11 +438,12 @@ pub(crate) fn wait_for_block(electrs: &E, min_height: usize) { header = exponential_backoff_poll(|| { electrs.ping().expect("failed to ping electrs"); electrs.block_headers_pop().expect("failed to pop block header") - }); + }) + .await; } } -pub(crate) fn wait_for_tx(electrs: &E, txid: Txid) { +pub(crate) async fn wait_for_tx(electrs: &E, txid: Txid) { if electrs.transaction_get(&txid).is_ok() { return; } @@ -450,10 +451,11 @@ pub(crate) fn wait_for_tx(electrs: &E, txid: Txid) { exponential_backoff_poll(|| { electrs.ping().unwrap(); electrs.transaction_get(&txid).ok() - }); + }) + .await; } -pub(crate) fn wait_for_outpoint_spend(electrs: &E, outpoint: OutPoint) { +pub(crate) async fn wait_for_outpoint_spend(electrs: &E, outpoint: OutPoint) { let tx = electrs.transaction_get(&outpoint.txid).unwrap(); let txout_script = tx.output.get(outpoint.vout as usize).unwrap().clone().script_pubkey; @@ -467,10 +469,11 @@ pub(crate) fn wait_for_outpoint_spend(electrs: &E, outpoint: Out let is_spent = !electrs.script_get_history(&txout_script).unwrap().is_empty(); is_spent.then_some(()) - }); + }) + .await; } -pub(crate) fn exponential_backoff_poll(mut poll: F) -> T +pub(crate) async fn exponential_backoff_poll(mut poll: F) -> T where F: FnMut() -> Option, { @@ -487,26 +490,26 @@ where } assert!(tries < 20, "Reached max tries."); tries += 1; - std::thread::sleep(delay); + tokio::time::sleep(delay).await; } } -pub(crate) fn premine_and_distribute_funds( +pub(crate) async fn premine_and_distribute_funds( bitcoind: &BitcoindClient, electrs: &E, addrs: Vec
, amount: Amount, ) { - premine_blocks(bitcoind, electrs); + premine_blocks(bitcoind, electrs).await; - distribute_funds_unconfirmed(bitcoind, electrs, addrs, amount); - generate_blocks_and_wait(bitcoind, electrs, 1); + distribute_funds_unconfirmed(bitcoind, electrs, addrs, amount).await; + generate_blocks_and_wait(bitcoind, electrs, 1).await; } -pub(crate) fn premine_blocks(bitcoind: &BitcoindClient, electrs: &E) { +pub(crate) async fn premine_blocks(bitcoind: &BitcoindClient, electrs: &E) { let _ = bitcoind.create_wallet("ldk_node_test"); let _ = bitcoind.load_wallet("ldk_node_test"); - generate_blocks_and_wait(bitcoind, electrs, 101); + generate_blocks_and_wait(bitcoind, electrs, 101).await; } -pub(crate) fn distribute_funds_unconfirmed( +pub(crate) async fn distribute_funds_unconfirmed( bitcoind: &BitcoindClient, electrs: &E, addrs: Vec
, amount: Amount, ) -> Txid { let mut amounts = HashMap::::new(); @@ -524,7 +527,7 @@ pub(crate) fn distribute_funds_unconfirmed( .parse() .unwrap(); - wait_for_tx(electrs, txid); + wait_for_tx(electrs, txid).await; txid } @@ -543,7 +546,7 @@ pub(crate) fn prepare_rbf( (tx, fee_output_index) } -pub(crate) fn bump_fee_and_broadcast( +pub(crate) async fn bump_fee_and_broadcast( bitcoind: &BitcoindClient, electrs: &E, mut tx: Transaction, fee_output_index: usize, is_insert_block: bool, ) -> Transaction { @@ -573,10 +576,10 @@ pub(crate) fn bump_fee_and_broadcast( match bitcoind.send_raw_transaction(&tx) { Ok(res) => { if is_insert_block { - generate_blocks_and_wait(bitcoind, electrs, 1); + generate_blocks_and_wait(bitcoind, electrs, 1).await; } let new_txid: Txid = res.0.parse().unwrap(); - wait_for_tx(electrs, new_txid); + wait_for_tx(electrs, new_txid).await; return tx; }, Err(_) => { @@ -591,14 +594,14 @@ pub(crate) fn bump_fee_and_broadcast( panic!("Failed to bump fee after {} attempts", attempts); } -pub fn open_channel( +pub async fn open_channel( node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, should_announce: bool, electrsd: &ElectrsD, ) -> OutPoint { - open_channel_push_amt(node_a, node_b, funding_amount_sat, None, should_announce, electrsd) + open_channel_push_amt(node_a, node_b, funding_amount_sat, None, should_announce, electrsd).await } -pub fn open_channel_push_amt( +pub async fn open_channel_push_amt( node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, push_amount_msat: Option, should_announce: bool, electrsd: &ElectrsD, ) -> OutPoint { @@ -628,12 +631,12 @@ pub fn open_channel_push_amt( let funding_txo_a = expect_channel_pending_event!(node_a, node_b.node_id()); let funding_txo_b = expect_channel_pending_event!(node_b, node_a.node_id()); assert_eq!(funding_txo_a, funding_txo_b); - wait_for_tx(&electrsd.client, funding_txo_a.txid); + wait_for_tx(&electrsd.client, funding_txo_a.txid).await; funding_txo_a } -pub(crate) fn do_channel_full_cycle( +pub(crate) async fn do_channel_full_cycle( node_a: TestNode, node_b: TestNode, bitcoind: &BitcoindClient, electrsd: &E, allow_0conf: bool, expect_anchor_channel: bool, force_close: bool, ) { @@ -647,7 +650,8 @@ pub(crate) fn do_channel_full_cycle( electrsd, vec![addr_a, addr_b], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, premine_amount_sat); @@ -706,10 +710,10 @@ pub(crate) fn do_channel_full_cycle( let funding_txo_b = expect_channel_pending_event!(node_b, node_a.node_id()); assert_eq!(funding_txo_a, funding_txo_b); - wait_for_tx(electrsd, funding_txo_a.txid); + wait_for_tx(electrsd, funding_txo_a.txid).await; if !allow_0conf { - generate_blocks_and_wait(&bitcoind, electrsd, 6); + generate_blocks_and_wait(&bitcoind, electrsd, 6).await; } node_a.sync_wallets().unwrap(); @@ -839,7 +843,7 @@ pub(crate) fn do_channel_full_cycle( let payment_id = node_a.bolt11_payment().send_using_amount(&invoice, overpaid_amount_msat, None).unwrap(); expect_event!(node_a, PaymentSuccessful); - let received_amount = match node_b.wait_next_event() { + let received_amount = match node_b.next_event_async().await { ref e @ Event::PaymentReceived { amount_msat, .. } => { println!("{} got event {:?}", std::stringify!(node_b), e); node_b.event_handled().unwrap(); @@ -877,7 +881,7 @@ pub(crate) fn do_channel_full_cycle( .unwrap(); expect_event!(node_a, PaymentSuccessful); - let received_amount = match node_b.wait_next_event() { + let received_amount = match node_b.next_event_async().await { ref e @ Event::PaymentReceived { amount_msat, .. } => { println!("{} got event {:?}", std::stringify!(node_b), e); node_b.event_handled().unwrap(); @@ -999,7 +1003,7 @@ pub(crate) fn do_channel_full_cycle( .send_with_custom_tlvs(keysend_amount_msat, node_b.node_id(), None, custom_tlvs.clone()) .unwrap(); expect_event!(node_a, PaymentSuccessful); - let next_event = node_b.wait_next_event(); + let next_event = node_b.next_event_async().await; let (received_keysend_amount, received_custom_records) = match next_event { ref e @ Event::PaymentReceived { amount_msat, ref custom_records, .. } => { println!("{} got event {:?}", std::stringify!(node_b), e); @@ -1049,7 +1053,7 @@ pub(crate) fn do_channel_full_cycle( println!("\nB close_channel (force: {})", force_close); if force_close { - std::thread::sleep(Duration::from_secs(1)); + tokio::time::sleep(Duration::from_secs(1)).await; node_a.force_close_channel(&user_channel_id, node_b.node_id(), None).unwrap(); } else { node_a.close_channel(&user_channel_id, node_b.node_id()).unwrap(); @@ -1058,9 +1062,9 @@ pub(crate) fn do_channel_full_cycle( expect_event!(node_a, ChannelClosed); expect_event!(node_b, ChannelClosed); - wait_for_outpoint_spend(electrsd, funding_txo_b); + wait_for_outpoint_spend(electrsd, funding_txo_b).await; - generate_blocks_and_wait(&bitcoind, electrsd, 1); + generate_blocks_and_wait(&bitcoind, electrsd, 1).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1076,7 +1080,7 @@ pub(crate) fn do_channel_full_cycle( assert_eq!(counterparty_node_id, node_a.node_id()); let cur_height = node_b.status().current_best_block.height; let blocks_to_go = confirmation_height - cur_height; - generate_blocks_and_wait(&bitcoind, electrsd, blocks_to_go as usize); + generate_blocks_and_wait(&bitcoind, electrsd, blocks_to_go as usize).await; node_b.sync_wallets().unwrap(); node_a.sync_wallets().unwrap(); }, @@ -1089,7 +1093,7 @@ pub(crate) fn do_channel_full_cycle( PendingSweepBalance::BroadcastAwaitingConfirmation { .. } => {}, _ => panic!("Unexpected balance state!"), } - generate_blocks_and_wait(&bitcoind, electrsd, 1); + generate_blocks_and_wait(&bitcoind, electrsd, 1).await; node_b.sync_wallets().unwrap(); node_a.sync_wallets().unwrap(); @@ -1099,7 +1103,7 @@ pub(crate) fn do_channel_full_cycle( PendingSweepBalance::AwaitingThresholdConfirmations { .. } => {}, _ => panic!("Unexpected balance state!"), } - generate_blocks_and_wait(&bitcoind, electrsd, 5); + generate_blocks_and_wait(&bitcoind, electrsd, 5).await; node_b.sync_wallets().unwrap(); node_a.sync_wallets().unwrap(); @@ -1117,7 +1121,7 @@ pub(crate) fn do_channel_full_cycle( assert_eq!(counterparty_node_id, node_b.node_id()); let cur_height = node_a.status().current_best_block.height; let blocks_to_go = confirmation_height - cur_height; - generate_blocks_and_wait(&bitcoind, electrsd, blocks_to_go as usize); + generate_blocks_and_wait(&bitcoind, electrsd, blocks_to_go as usize).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); }, @@ -1130,7 +1134,7 @@ pub(crate) fn do_channel_full_cycle( PendingSweepBalance::BroadcastAwaitingConfirmation { .. } => {}, _ => panic!("Unexpected balance state!"), } - generate_blocks_and_wait(&bitcoind, electrsd, 1); + generate_blocks_and_wait(&bitcoind, electrsd, 1).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1140,7 +1144,7 @@ pub(crate) fn do_channel_full_cycle( PendingSweepBalance::AwaitingThresholdConfirmations { .. } => {}, _ => panic!("Unexpected balance state!"), } - generate_blocks_and_wait(&bitcoind, electrsd, 5); + generate_blocks_and_wait(&bitcoind, electrsd, 5).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); } diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 6fc72b2c2..38e345f15 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -25,8 +25,8 @@ use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; -#[test] -fn test_cln() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_cln() { // Setup bitcoind / electrs clients let bitcoind_client = BitcoindClient::new_with_auth( "http://127.0.0.1:18443", @@ -36,7 +36,7 @@ fn test_cln() { let electrs_client = ElectrumClient::new("tcp://127.0.0.1:50001").unwrap(); // Give electrs a kick. - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1); + common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1).await; // Setup LDK Node let config = common::random_config(true); @@ -54,7 +54,8 @@ fn test_cln() { &electrs_client, vec![address], premine_amount, - ); + ) + .await; // Setup CLN let sock = "/tmp/lightning-rpc"; @@ -67,7 +68,7 @@ fn test_cln() { if info.blockheight > 0 { break info; } - std::thread::sleep(std::time::Duration::from_millis(250)); + tokio::time::sleep(std::time::Duration::from_millis(250)).await; } }; let cln_node_id = PublicKey::from_str(&cln_info.id).unwrap(); @@ -92,8 +93,8 @@ fn test_cln() { .unwrap(); let funding_txo = common::expect_channel_pending_event!(node, cln_node_id); - common::wait_for_tx(&electrs_client, funding_txo.txid); - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6); + common::wait_for_tx(&electrs_client, funding_txo.txid).await; + common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6).await; node.sync_wallets().unwrap(); let user_channel_id = common::expect_channel_ready_event!(node, cln_node_id); diff --git a/tests/integration_tests_lnd.rs b/tests/integration_tests_lnd.rs index 7dfc1e4f9..311a11c3c 100755 --- a/tests/integration_tests_lnd.rs +++ b/tests/integration_tests_lnd.rs @@ -34,7 +34,7 @@ async fn test_lnd() { let electrs_client = ElectrumClient::new("tcp://127.0.0.1:50001").unwrap(); // Give electrs a kick. - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1); + common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1).await; // Setup LDK Node let config = common::random_config(true); @@ -52,7 +52,8 @@ async fn test_lnd() { &electrs_client, vec![address], premine_amount, - ); + ) + .await; // Setup LND let endpoint = "127.0.0.1:8081"; @@ -73,8 +74,8 @@ async fn test_lnd() { .unwrap(); let funding_txo = common::expect_channel_pending_event!(node, lnd_node_id); - common::wait_for_tx(&electrs_client, funding_txo.txid); - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6); + common::wait_for_tx(&electrs_client, funding_txo.txid).await; + common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6).await; node.sync_wallets().unwrap(); let user_channel_id = common::expect_channel_ready_event!(node, lnd_node_id); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 804bba876..e2d4207cd 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -39,72 +39,80 @@ use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; use log::LevelFilter; -#[test] -fn channel_full_cycle() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; } -#[test] -fn channel_full_cycle_electrum() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_electrum() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Electrum(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; } -#[test] -fn channel_full_cycle_bitcoind_rpc_sync() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_bitcoind_rpc_sync() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::BitcoindRpcSync(&bitcoind); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; } -#[test] -fn channel_full_cycle_bitcoind_rest_sync() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_bitcoind_rest_sync() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::BitcoindRestSync(&bitcoind); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; } -#[test] -fn channel_full_cycle_force_close() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) + .await; } -#[test] -fn channel_full_cycle_force_close_trusted_no_reserve() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_force_close_trusted_no_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, true); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) + .await; } -#[test] -fn channel_full_cycle_0conf() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_0conf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, true, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, true, true, false) + .await; } -#[test] -fn channel_full_cycle_legacy_staticremotekey() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_legacy_staticremotekey() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false) + .await; } -#[test] -fn channel_open_fails_when_funds_insufficient() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_open_fails_when_funds_insufficient() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -119,7 +127,8 @@ fn channel_open_fails_when_funds_insufficient() { &electrsd.client, vec![addr_a, addr_b], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, premine_amount_sat); @@ -138,8 +147,8 @@ fn channel_open_fails_when_funds_insufficient() { ); } -#[test] -fn multi_hop_sending() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn multi_hop_sending() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); @@ -162,7 +171,8 @@ fn multi_hop_sending() { &electrsd.client, addresses, Amount::from_sat(premine_amount_sat), - ); + ) + .await; for n in &nodes { n.sync_wallets().unwrap(); @@ -177,18 +187,18 @@ fn multi_hop_sending() { // \ / // (1M:0)- N3 -(1M:0) - open_channel(&nodes[0], &nodes[1], 100_000, true, &electrsd); - open_channel(&nodes[1], &nodes[2], 1_000_000, true, &electrsd); + open_channel(&nodes[0], &nodes[1], 100_000, true, &electrsd).await; + open_channel(&nodes[1], &nodes[2], 1_000_000, true, &electrsd).await; // We need to sync wallets in-between back-to-back channel opens from the same node so BDK // wallet picks up on the broadcast funding tx and doesn't double-spend itself. // // TODO: Remove once fixed in BDK. nodes[1].sync_wallets().unwrap(); - open_channel(&nodes[1], &nodes[3], 1_000_000, true, &electrsd); - open_channel(&nodes[2], &nodes[4], 1_000_000, true, &electrsd); - open_channel(&nodes[3], &nodes[4], 1_000_000, true, &electrsd); + open_channel(&nodes[1], &nodes[3], 1_000_000, true, &electrsd).await; + open_channel(&nodes[2], &nodes[4], 1_000_000, true, &electrsd).await; + open_channel(&nodes[3], &nodes[4], 1_000_000, true, &electrsd).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; for n in &nodes { n.sync_wallets().unwrap(); @@ -206,7 +216,7 @@ fn multi_hop_sending() { expect_event!(nodes[4], ChannelReady); // Sleep a bit for gossip to propagate. - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; let route_params = RouteParametersConfig { max_total_routing_fee_msat: Some(75_000), @@ -235,8 +245,8 @@ fn multi_hop_sending() { expect_payment_successful_event!(nodes[0], payment_id, Some(fee_paid_msat)); } -#[test] -fn start_stop_reinit() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn start_stop_reinit() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let config = random_config(true); @@ -265,7 +275,8 @@ fn start_stop_reinit() { &electrsd.client, vec![funding_address], expected_amount, - ); + ) + .await; node.sync_wallets().unwrap(); assert_eq!(node.list_balances().spendable_onchain_balance_sats, expected_amount.to_sat()); @@ -304,8 +315,8 @@ fn start_stop_reinit() { reinitialized_node.stop().unwrap(); } -#[test] -fn onchain_send_receive() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn onchain_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -323,7 +334,8 @@ fn onchain_send_receive() { &electrsd.client, vec![addr_a.clone(), addr_b.clone()], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -350,8 +362,8 @@ fn onchain_send_receive() { let channel_amount_sat = 1_000_000; let reserve_amount_sat = 25_000; - open_channel(&node_b, &node_a, channel_amount_sat, true, &electrsd); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + open_channel(&node_b, &node_a, channel_amount_sat, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -393,7 +405,7 @@ fn onchain_send_receive() { let amount_to_send_sats = 54321; let txid = node_b.onchain_payment().send_to_address(&addr_a, amount_to_send_sats, None).unwrap(); - wait_for_tx(&electrsd.client, txid); + wait_for_tx(&electrsd.client, txid).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -420,7 +432,7 @@ fn onchain_send_receive() { assert_eq!(payment_a.amount_msat, payment_b.amount_msat); assert_eq!(payment_a.fee_paid_msat, payment_b.fee_paid_msat); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -458,8 +470,8 @@ fn onchain_send_receive() { let addr_b = node_b.onchain_payment().new_address().unwrap(); let txid = node_a.onchain_payment().send_all_to_address(&addr_b, true, None).unwrap(); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); - wait_for_tx(&electrsd.client, txid); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + wait_for_tx(&electrsd.client, txid).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -481,8 +493,8 @@ fn onchain_send_receive() { let addr_b = node_b.onchain_payment().new_address().unwrap(); let txid = node_a.onchain_payment().send_all_to_address(&addr_b, false, None).unwrap(); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); - wait_for_tx(&electrsd.client, txid); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + wait_for_tx(&electrsd.client, txid).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -504,8 +516,8 @@ fn onchain_send_receive() { assert_eq!(node_b_payments.len(), 5); } -#[test] -fn onchain_send_all_retains_reserve() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn onchain_send_all_retains_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -522,7 +534,8 @@ fn onchain_send_all_retains_reserve() { &electrsd.client, vec![addr_a.clone(), addr_b.clone()], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -532,8 +545,8 @@ fn onchain_send_all_retains_reserve() { // Send all over, with 0 reserve as we don't have any channels open. let txid = node_a.onchain_payment().send_all_to_address(&addr_b, true, None).unwrap(); - wait_for_tx(&electrsd.client, txid); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -550,15 +563,15 @@ fn onchain_send_all_retains_reserve() { .0 .parse() .unwrap(); - wait_for_tx(&electrsd.client, txid); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, reserve_amount_sat); // Open a channel. - open_channel(&node_b, &node_a, premine_amount_sat, false, &electrsd); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + open_channel(&node_b, &node_a, premine_amount_sat, false, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); expect_channel_ready_event!(node_a, node_b.node_id()); @@ -573,8 +586,8 @@ fn onchain_send_all_retains_reserve() { // Send all over again, this time ensuring the reserve is accounted for let txid = node_b.onchain_payment().send_all_to_address(&addr_a, true, None).unwrap(); - wait_for_tx(&electrsd.client, txid); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -587,8 +600,8 @@ fn onchain_send_all_retains_reserve() { .contains(&node_a.list_balances().spendable_onchain_balance_sats)); } -#[test] -fn onchain_wallet_recovery() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn onchain_wallet_recovery() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -607,7 +620,8 @@ fn onchain_wallet_recovery() { &electrsd.client, vec![addr_1], Amount::from_sat(premine_amount_sat), - ); + ) + .await; original_node.sync_wallets().unwrap(); assert_eq!(original_node.list_balances().spendable_onchain_balance_sats, premine_amount_sat); @@ -620,9 +634,9 @@ fn onchain_wallet_recovery() { .0 .parse() .unwrap(); - wait_for_tx(&electrsd.client, txid); + wait_for_tx(&electrsd.client, txid).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; original_node.sync_wallets().unwrap(); assert_eq!( @@ -656,9 +670,9 @@ fn onchain_wallet_recovery() { .0 .parse() .unwrap(); - wait_for_tx(&electrsd.client, txid); + wait_for_tx(&electrsd.client, txid).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; recovered_node.sync_wallets().unwrap(); assert_eq!( @@ -667,20 +681,20 @@ fn onchain_wallet_recovery() { ); } -#[test] -fn test_rbf_via_mempool() { - run_rbf_test(false); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_rbf_via_mempool() { + run_rbf_test(false).await; } -#[test] -fn test_rbf_via_direct_block_insertion() { - run_rbf_test(true); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_rbf_via_direct_block_insertion() { + run_rbf_test(true).await; } // `is_insert_block`: // - `true`: transaction is mined immediately (no mempool), testing confirmed-Tx handling. // - `false`: transaction stays in mempool until confirmation, testing unconfirmed-Tx handling. -fn run_rbf_test(is_insert_block: bool) { +async fn run_rbf_test(is_insert_block: bool) { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source_bitcoind = TestChainSource::BitcoindRpcSync(&bitcoind); let chain_source_electrsd = TestChainSource::Electrum(&electrsd); @@ -701,7 +715,7 @@ fn run_rbf_test(is_insert_block: bool) { ]; let (bitcoind, electrs) = (&bitcoind.client, &electrsd.client); - premine_blocks(bitcoind, electrs); + premine_blocks(bitcoind, electrs).await; // Helpers declaration before starting the test let all_addrs = @@ -715,7 +729,8 @@ fn run_rbf_test(is_insert_block: bool) { electrs, all_addrs.clone(), Amount::from_sat(amount_sat), - ); + ) + .await; }; } macro_rules! validate_balances { @@ -745,14 +760,14 @@ fn run_rbf_test(is_insert_block: bool) { output.script_pubkey = new_addr.script_pubkey(); } }); - bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block).await; validate_balances!(0, is_insert_block); // Not modifying the output scripts, but still bumping the fee. distribute_funds_all_nodes!(); validate_balances!(amount_sat, false); (tx, fee_output_index) = prepare_rbf(electrs, txid, &scripts_buf); - bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block).await; validate_balances!(amount_sat, is_insert_block); let mut final_amount_sat = amount_sat * 2; @@ -766,7 +781,7 @@ fn run_rbf_test(is_insert_block: bool) { output.value = Amount::from_sat(output.value.to_sat() + value_sat); } }); - bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block).await; final_amount_sat += value_sat; validate_balances!(final_amount_sat, is_insert_block); @@ -779,12 +794,12 @@ fn run_rbf_test(is_insert_block: bool) { output.value = Amount::from_sat(output.value.to_sat() - value_sat); } }); - bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block).await; final_amount_sat -= value_sat; validate_balances!(final_amount_sat, is_insert_block); if !is_insert_block { - generate_blocks_and_wait(bitcoind, electrs, 1); + generate_blocks_and_wait(bitcoind, electrs, 1).await; validate_balances!(final_amount_sat, true); } @@ -795,15 +810,15 @@ fn run_rbf_test(is_insert_block: bool) { let txid = node.onchain_payment().send_all_to_address(&addr, true, None).unwrap(); txids.push(txid); }); - txids.iter().for_each(|txid| { - wait_for_tx(electrs, *txid); - }); - generate_blocks_and_wait(bitcoind, electrs, 6); + for txid in txids { + wait_for_tx(electrs, txid).await; + } + generate_blocks_and_wait(bitcoind, electrs, 6).await; validate_balances!(0, true); } -#[test] -fn sign_verify_msg() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn sign_verify_msg() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let config = random_config(true); let chain_source = TestChainSource::Esplora(&electrsd); @@ -816,8 +831,8 @@ fn sign_verify_msg() { assert!(node.verify_signature(msg, sig.as_str(), &pkey)); } -#[test] -fn connection_multi_listen() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn connection_multi_listen() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); @@ -831,13 +846,13 @@ fn connection_multi_listen() { } } -#[test] -fn connection_restart_behavior() { - do_connection_restart_behavior(true); - do_connection_restart_behavior(false); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn connection_restart_behavior() { + do_connection_restart_behavior(true).await; + do_connection_restart_behavior(false).await; } -fn do_connection_restart_behavior(persist: bool) { +async fn do_connection_restart_behavior(persist: bool) { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); @@ -865,7 +880,7 @@ fn do_connection_restart_behavior(persist: bool) { node_a.start().unwrap(); // Sleep a bit to allow for the reconnect to happen. - std::thread::sleep(std::time::Duration::from_secs(5)); + tokio::time::sleep(std::time::Duration::from_secs(5)).await; if persist { let peer_details_a = node_a.list_peers().first().unwrap().clone(); @@ -883,8 +898,8 @@ fn do_connection_restart_behavior(persist: bool) { } } -#[test] -fn concurrent_connections_succeed() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn concurrent_connections_succeed() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -910,8 +925,8 @@ fn concurrent_connections_succeed() { } } -#[test] -fn simple_bolt12_send_receive() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -923,12 +938,13 @@ fn simple_bolt12_send_receive() { &electrsd.client, vec![address_a], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -938,11 +954,11 @@ fn simple_bolt12_send_receive() { // Sleep until we broadcasted a node announcement. while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); + tokio::time::sleep(std::time::Duration::from_millis(10)).await; } // Sleep one more sec to make sure the node announcement propagates. - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; let expected_amount_msat = 100_000_000; let offer = @@ -1131,8 +1147,8 @@ fn simple_bolt12_send_receive() { assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(overpaid_amount)); } -#[test] -fn async_payment() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn async_payment() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1186,15 +1202,16 @@ fn async_payment() { &electrsd.client, vec![address_sender, address_sender_lsp, address_receiver_lsp, address_receiver], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_sender.sync_wallets().unwrap(); node_sender_lsp.sync_wallets().unwrap(); node_receiver_lsp.sync_wallets().unwrap(); node_receiver.sync_wallets().unwrap(); - open_channel(&node_sender, &node_sender_lsp, 400_000, false, &electrsd); - open_channel(&node_sender_lsp, &node_receiver_lsp, 400_000, true, &electrsd); + open_channel(&node_sender, &node_sender_lsp, 400_000, false, &electrsd).await; + open_channel(&node_sender_lsp, &node_receiver_lsp, 400_000, true, &electrsd).await; open_channel_push_amt( &node_receiver, &node_receiver_lsp, @@ -1202,9 +1219,10 @@ fn async_payment() { Some(200_000_000), false, &electrsd, - ); + ) + .await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_sender.sync_wallets().unwrap(); node_sender_lsp.sync_wallets().unwrap(); @@ -1238,7 +1256,7 @@ fn async_payment() { || !has_node_announcements(&node_receiver_lsp) || !has_node_announcements(&node_receiver) { - std::thread::sleep(std::time::Duration::from_millis(100)); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; } let recipient_id = vec![1, 2, 3]; @@ -1251,7 +1269,7 @@ fn async_payment() { break offer; } - std::thread::sleep(std::time::Duration::from_millis(100)); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; }; node_receiver.stop().unwrap(); @@ -1260,15 +1278,15 @@ fn async_payment() { node_sender.bolt12_payment().send_using_amount(&offer, 5_000, None, None).unwrap(); // Sleep to allow the payment reach a state where the htlc is held and waiting for the receiver to come online. - std::thread::sleep(std::time::Duration::from_millis(3000)); + tokio::time::sleep(std::time::Duration::from_millis(3000)).await; node_receiver.start().unwrap(); expect_payment_successful_event!(node_sender, Some(payment_id), None); } -#[test] -fn test_node_announcement_propagation() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_node_announcement_propagation() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1306,14 +1324,15 @@ fn test_node_announcement_propagation() { &electrsd.client, vec![address_a], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); // Open an announced channel from node_a to node_b - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1323,11 +1342,11 @@ fn test_node_announcement_propagation() { // Wait until node_b broadcasts a node announcement while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); + tokio::time::sleep(std::time::Duration::from_millis(10)).await; } // Sleep to make sure the node announcement propagates - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; // Get node info from the other node's perspective let node_a_info = node_b.network_graph().node(&NodeId::from_pubkey(&node_a.node_id())).unwrap(); @@ -1358,8 +1377,8 @@ fn test_node_announcement_propagation() { assert_eq!(node_b_announcement_info.addresses, node_b_listening_addresses); } -#[test] -fn generate_bip21_uri() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn generate_bip21_uri() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1388,11 +1407,12 @@ fn generate_bip21_uri() { &electrsd.client, vec![address_a], Amount::from_sat(premined_sats), - ); + ) + .await; node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1412,8 +1432,8 @@ fn generate_bip21_uri() { assert!(uqr_payment.contains("lno=")); } -#[test] -fn unified_qr_send_receive() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn unified_qr_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1427,11 +1447,12 @@ fn unified_qr_send_receive() { &electrsd.client, vec![address_a], Amount::from_sat(premined_sats), - ); + ) + .await; node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1441,11 +1462,11 @@ fn unified_qr_send_receive() { // Sleep until we broadcast a node announcement. while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); + tokio::time::sleep(std::time::Duration::from_millis(10)).await; } // Sleep one more sec to make sure the node announcement propagates. - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; let expected_amount_sats = 100_000; let expiry_sec = 4_000; @@ -1512,8 +1533,8 @@ fn unified_qr_send_receive() { }, }; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); - wait_for_tx(&electrsd.client, txid); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + wait_for_tx(&electrsd.client, txid).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1522,8 +1543,8 @@ fn unified_qr_send_receive() { assert_eq!(node_b.list_balances().total_lightning_balance_sats, 200_000); } -#[test] -fn lsps2_client_service_integration() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn lsps2_client_service_integration() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); @@ -1579,16 +1600,17 @@ fn lsps2_client_service_integration() { &electrsd.client, vec![service_addr, client_addr, payer_addr], Amount::from_sat(premine_amount_sat), - ); + ) + .await; service_node.sync_wallets().unwrap(); client_node.sync_wallets().unwrap(); payer_node.sync_wallets().unwrap(); // Open a channel payer -> service that will allow paying the JIT invoice println!("Opening channel payer_node -> service_node!"); - open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd); + open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; service_node.sync_wallets().unwrap(); payer_node.sync_wallets().unwrap(); expect_channel_ready_event!(payer_node, service_node.node_id()); @@ -1743,8 +1765,8 @@ fn lsps2_client_service_integration() { assert_eq!(client_node.payment(&payment_id).unwrap().status, PaymentStatus::Failed); } -#[test] -fn facade_logging() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn facade_logging() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1761,8 +1783,8 @@ fn facade_logging() { } } -#[test] -fn spontaneous_send_with_custom_preimage() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn spontaneous_send_with_custom_preimage() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -1774,11 +1796,12 @@ fn spontaneous_send_with_custom_preimage() { &electrsd.client, vec![address_a], Amount::from_sat(premine_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 500_000, true, &electrsd); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + open_channel(&node_a, &node_b, 500_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); expect_channel_ready_event!(node_a, node_b.node_id()); diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index bdd876003..93f167dae 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -13,8 +13,8 @@ use std::collections::HashMap; use ldk_node::Builder; -#[test] -fn channel_full_cycle_with_vss_store() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_with_vss_store() { let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); println!("== Node A =="); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); @@ -52,5 +52,6 @@ fn channel_full_cycle_with_vss_store() { false, true, false, - ); + ) + .await; } diff --git a/tests/reorg_test.rs b/tests/reorg_test.rs index 03ace908f..491a37fd4 100644 --- a/tests/reorg_test.rs +++ b/tests/reorg_test.rs @@ -17,179 +17,187 @@ proptest! { #![proptest_config(proptest::test_runner::Config::with_cases(5))] #[test] fn reorg_test(reorg_depth in 1..=6usize, force_close in prop::bool::ANY) { - let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - - let chain_source_bitcoind = TestChainSource::BitcoindRpcSync(&bitcoind); - let chain_source_electrsd = TestChainSource::Electrum(&electrsd); - let chain_source_esplora = TestChainSource::Esplora(&electrsd); - - macro_rules! config_node { - ($chain_source: expr, $anchor_channels: expr) => {{ - let config_a = random_config($anchor_channels); - let node = setup_node(&$chain_source, config_a, None); - node - }}; - } - let anchor_channels = true; - let nodes = vec![ - config_node!(chain_source_electrsd, anchor_channels), - config_node!(chain_source_bitcoind, anchor_channels), - config_node!(chain_source_esplora, anchor_channels), - ]; - - let (bitcoind, electrs) = (&bitcoind.client, &electrsd.client); - macro_rules! reorg { - ($reorg_depth: expr) => {{ - invalidate_blocks(bitcoind, $reorg_depth); - generate_blocks_and_wait(bitcoind, electrs, $reorg_depth); - }}; - } - - let amount_sat = 2_100_000; - let addr_nodes = - nodes.iter().map(|node| node.onchain_payment().new_address().unwrap()).collect::>(); - premine_and_distribute_funds(bitcoind, electrs, addr_nodes, Amount::from_sat(amount_sat)); - - macro_rules! sync_wallets { - () => { - nodes.iter().for_each(|node| node.sync_wallets().unwrap()) - }; - } - sync_wallets!(); - nodes.iter().for_each(|node| { - assert_eq!(node.list_balances().spendable_onchain_balance_sats, amount_sat); - assert_eq!(node.list_balances().total_onchain_balance_sats, amount_sat); - }); - - - let mut nodes_funding_tx = HashMap::new(); - let funding_amount_sat = 2_000_000; - for (node, next_node) in nodes.iter().zip(nodes.iter().cycle().skip(1)) { - let funding_txo = open_channel(node, next_node, funding_amount_sat, true, &electrsd); - nodes_funding_tx.insert(node.node_id(), funding_txo); - } - - generate_blocks_and_wait(bitcoind, electrs, 6); - sync_wallets!(); - - reorg!(reorg_depth); - sync_wallets!(); - - macro_rules! collect_channel_ready_events { - ($node:expr, $expected:expr) => {{ - let mut user_channels = HashMap::new(); - for _ in 0..$expected { - match $node.wait_next_event() { - Event::ChannelReady { user_channel_id, counterparty_node_id, .. } => { - $node.event_handled().unwrap(); - user_channels.insert(counterparty_node_id, user_channel_id); - }, - other => panic!("Unexpected event: {:?}", other), + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + rt.block_on(async { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + + let chain_source_bitcoind = TestChainSource::BitcoindRpcSync(&bitcoind); + let chain_source_electrsd = TestChainSource::Electrum(&electrsd); + let chain_source_esplora = TestChainSource::Esplora(&electrsd); + + macro_rules! config_node { + ($chain_source: expr, $anchor_channels: expr) => {{ + let config_a = random_config($anchor_channels); + let node = setup_node(&$chain_source, config_a, None); + node + }}; + } + let anchor_channels = true; + let nodes = vec![ + config_node!(chain_source_electrsd, anchor_channels), + config_node!(chain_source_bitcoind, anchor_channels), + config_node!(chain_source_esplora, anchor_channels), + ]; + + let (bitcoind, electrs) = (&bitcoind.client, &electrsd.client); + macro_rules! reorg { + ($reorg_depth: expr) => {{ + invalidate_blocks(bitcoind, $reorg_depth); + generate_blocks_and_wait(bitcoind, electrs, $reorg_depth).await; + }}; + } + + let amount_sat = 2_100_000; + let addr_nodes = + nodes.iter().map(|node| node.onchain_payment().new_address().unwrap()).collect::>(); + premine_and_distribute_funds(bitcoind, electrs, addr_nodes, Amount::from_sat(amount_sat)).await; + + macro_rules! sync_wallets { + () => { + for node in &nodes { + node.sync_wallets().unwrap(); } - } - user_channels - }}; - } + }; + } + sync_wallets!(); + nodes.iter().for_each(|node| { + assert_eq!(node.list_balances().spendable_onchain_balance_sats, amount_sat); + assert_eq!(node.list_balances().total_onchain_balance_sats, amount_sat); + }); - let mut node_channels_id = HashMap::new(); - for (i, node) in nodes.iter().enumerate() { - assert_eq!( - node - .list_payments_with_filter(|p| p.direction == PaymentDirection::Outbound - && matches!(p.kind, PaymentKind::Onchain { .. })) - .len(), - 1 - ); - let user_channels = collect_channel_ready_events!(node, 2); - let next_node = nodes.get((i + 1) % nodes.len()).unwrap(); - let prev_node = nodes.get((i + nodes.len() - 1) % nodes.len()).unwrap(); + let mut nodes_funding_tx = HashMap::new(); + let funding_amount_sat = 2_000_000; + for (node, next_node) in nodes.iter().zip(nodes.iter().cycle().skip(1)) { + let funding_txo = open_channel(node, next_node, funding_amount_sat, true, &electrsd).await; + nodes_funding_tx.insert(node.node_id(), funding_txo); + } - assert!(user_channels.get(&Some(next_node.node_id())) != None); - assert!(user_channels.get(&Some(prev_node.node_id())) != None); + generate_blocks_and_wait(bitcoind, electrs, 6).await; + sync_wallets!(); + + reorg!(reorg_depth); + sync_wallets!(); + + macro_rules! collect_channel_ready_events { + ($node:expr, $expected:expr) => {{ + let mut user_channels = HashMap::new(); + for _ in 0..$expected { + match $node.next_event_async().await { + Event::ChannelReady { user_channel_id, counterparty_node_id, .. } => { + $node.event_handled().unwrap(); + user_channels.insert(counterparty_node_id, user_channel_id); + }, + other => panic!("Unexpected event: {:?}", other), + } + } + user_channels + }}; + } - let user_channel_id = - user_channels.get(&Some(next_node.node_id())).expect("Missing user channel for node"); - node_channels_id.insert(node.node_id(), *user_channel_id); - } + let mut node_channels_id = HashMap::new(); + for (i, node) in nodes.iter().enumerate() { + assert_eq!( + node + .list_payments_with_filter(|p| p.direction == PaymentDirection::Outbound + && matches!(p.kind, PaymentKind::Onchain { .. })) + .len(), + 1 + ); + + let user_channels = collect_channel_ready_events!(node, 2); + let next_node = nodes.get((i + 1) % nodes.len()).unwrap(); + let prev_node = nodes.get((i + nodes.len() - 1) % nodes.len()).unwrap(); + + assert!(user_channels.get(&Some(next_node.node_id())) != None); + assert!(user_channels.get(&Some(prev_node.node_id())) != None); + + let user_channel_id = + user_channels.get(&Some(next_node.node_id())).expect("Missing user channel for node"); + node_channels_id.insert(node.node_id(), *user_channel_id); + } - for (node, next_node) in nodes.iter().zip(nodes.iter().cycle().skip(1)) { - let user_channel_id = node_channels_id.get(&node.node_id()).expect("user channel id not exist"); - let funding = nodes_funding_tx.get(&node.node_id()).expect("Funding tx not exist"); + for (node, next_node) in nodes.iter().zip(nodes.iter().cycle().skip(1)) { + let user_channel_id = node_channels_id.get(&node.node_id()).expect("user channel id not exist"); + let funding = nodes_funding_tx.get(&node.node_id()).expect("Funding tx not exist"); - if force_close { - node.force_close_channel(&user_channel_id, next_node.node_id(), None).unwrap(); - } else { - node.close_channel(&user_channel_id, next_node.node_id()).unwrap(); - } + if force_close { + node.force_close_channel(&user_channel_id, next_node.node_id(), None).unwrap(); + } else { + node.close_channel(&user_channel_id, next_node.node_id()).unwrap(); + } - expect_event!(node, ChannelClosed); - expect_event!(next_node, ChannelClosed); + expect_event!(node, ChannelClosed); + expect_event!(next_node, ChannelClosed); - wait_for_outpoint_spend(electrs, *funding); - } + wait_for_outpoint_spend(electrs, *funding).await; + } - reorg!(reorg_depth); - sync_wallets!(); + reorg!(reorg_depth); + sync_wallets!(); - generate_blocks_and_wait(bitcoind, electrs, 1); - sync_wallets!(); + generate_blocks_and_wait(bitcoind, electrs, 1).await; + sync_wallets!(); - if force_close { - nodes.iter().for_each(|node| { - node.sync_wallets().unwrap(); - // If there is no more balance, there is nothing to process here. - if node.list_balances().lightning_balances.len() < 1 { - return; - } - match node.list_balances().lightning_balances[0] { - LightningBalance::ClaimableAwaitingConfirmations { - confirmation_height, - .. - } => { - let cur_height = node.status().current_best_block.height; - let blocks_to_go = confirmation_height - cur_height; - generate_blocks_and_wait(bitcoind, electrs, blocks_to_go as usize); - node.sync_wallets().unwrap(); - }, - _ => panic!("Unexpected balance state for node_hub!"), - } + if force_close { + for node in &nodes { + node.sync_wallets().unwrap(); + // If there is no more balance, there is nothing to process here. + if node.list_balances().lightning_balances.len() < 1 { + return; + } + match node.list_balances().lightning_balances[0] { + LightningBalance::ClaimableAwaitingConfirmations { + confirmation_height, + .. + } => { + let cur_height = node.status().current_best_block.height; + let blocks_to_go = confirmation_height - cur_height; + generate_blocks_and_wait(bitcoind, electrs, blocks_to_go as usize).await; + node.sync_wallets().unwrap(); + }, + _ => panic!("Unexpected balance state for node_hub!"), + } - assert!(node.list_balances().lightning_balances.len() < 2); - assert!(node.list_balances().pending_balances_from_channel_closures.len() > 0); - match node.list_balances().pending_balances_from_channel_closures[0] { - PendingSweepBalance::BroadcastAwaitingConfirmation { .. } => {}, - _ => panic!("Unexpected balance state!"), - } + assert!(node.list_balances().lightning_balances.len() < 2); + assert!(node.list_balances().pending_balances_from_channel_closures.len() > 0); + match node.list_balances().pending_balances_from_channel_closures[0] { + PendingSweepBalance::BroadcastAwaitingConfirmation { .. } => {}, + _ => panic!("Unexpected balance state!"), + } - generate_blocks_and_wait(&bitcoind, electrs, 1); - node.sync_wallets().unwrap(); - assert!(node.list_balances().lightning_balances.len() < 2); - assert!(node.list_balances().pending_balances_from_channel_closures.len() > 0); - match node.list_balances().pending_balances_from_channel_closures[0] { - PendingSweepBalance::AwaitingThresholdConfirmations { .. } => {}, - _ => panic!("Unexpected balance state!"), + generate_blocks_and_wait(&bitcoind, electrs, 1).await; + node.sync_wallets().unwrap(); + assert!(node.list_balances().lightning_balances.len() < 2); + assert!(node.list_balances().pending_balances_from_channel_closures.len() > 0); + match node.list_balances().pending_balances_from_channel_closures[0] { + PendingSweepBalance::AwaitingThresholdConfirmations { .. } => {}, + _ => panic!("Unexpected balance state!"), + } } - }); - } + } - generate_blocks_and_wait(bitcoind, electrs, 6); - sync_wallets!(); + generate_blocks_and_wait(bitcoind, electrs, 6).await; + sync_wallets!(); - reorg!(reorg_depth); - sync_wallets!(); + reorg!(reorg_depth); + sync_wallets!(); - let fee_sat = 7000; - // Check balance after close channel - nodes.iter().for_each(|node| { - assert!(node.list_balances().spendable_onchain_balance_sats > amount_sat - fee_sat); - assert!(node.list_balances().spendable_onchain_balance_sats < amount_sat); + let fee_sat = 7000; + // Check balance after close channel + nodes.iter().for_each(|node| { + assert!(node.list_balances().spendable_onchain_balance_sats > amount_sat - fee_sat); + assert!(node.list_balances().spendable_onchain_balance_sats < amount_sat); - assert_eq!(node.list_balances().total_anchor_channels_reserve_sats, 0); - assert!(node.list_balances().lightning_balances.is_empty()); + assert_eq!(node.list_balances().total_anchor_channels_reserve_sats, 0); + assert!(node.list_balances().lightning_balances.is_empty()); - assert_eq!(node.next_event(), None); - }); + assert_eq!(node.next_event(), None); + }); + }) } } From 8512c26035109f78b15b8e46be9ffc41037690bd Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 20 Oct 2025 09:16:08 -0500 Subject: [PATCH 125/184] Add `async { ..await }` in remaining places Since it seems to make a difference to `tokio` (see https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures are always put in an `async` closure. --- src/runtime.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/runtime.rs b/src/runtime.rs index 2275d5bea..1e9883ae4 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -67,7 +67,10 @@ impl Runtime { { let mut background_tasks = self.background_tasks.lock().unwrap(); let runtime_handle = self.handle(); - background_tasks.spawn_on(future, runtime_handle); + // Since it seems to make a difference to `tokio` (see + // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures + // are always put in an `async` / `.await` closure. + background_tasks.spawn_on(async { future.await }, runtime_handle); } pub fn spawn_cancellable_background_task(&self, future: F) @@ -76,7 +79,10 @@ impl Runtime { { let mut cancellable_background_tasks = self.cancellable_background_tasks.lock().unwrap(); let runtime_handle = self.handle(); - cancellable_background_tasks.spawn_on(future, runtime_handle); + // Since it seems to make a difference to `tokio` (see + // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures + // are always put in an `async` / `.await` closure. + cancellable_background_tasks.spawn_on(async { future.await }, runtime_handle); } pub fn spawn_background_processor_task(&self, future: F) @@ -107,7 +113,10 @@ impl Runtime { // to detect the outer context here, and otherwise use whatever was set during // initialization. let handle = tokio::runtime::Handle::try_current().unwrap_or(self.handle().clone()); - tokio::task::block_in_place(move || handle.block_on(future)) + // Since it seems to make a difference to `tokio` (see + // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures + // are always put in an `async` / `.await` closure. + tokio::task::block_in_place(move || handle.block_on(async { future.await })) } pub fn abort_cancellable_background_tasks(&self) { @@ -154,6 +163,9 @@ impl Runtime { self.background_processor_task.lock().unwrap().take() { let abort_handle = background_processor_task.abort_handle(); + // Since it seems to make a difference to `tokio` (see + // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures + // are always put in an `async` / `.await` closure. let timeout_res = self.block_on(async { tokio::time::timeout( Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS), From e01ea056c865ff3ffb12b38e2438cdb838ff019e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 20 Oct 2025 15:26:58 -0500 Subject: [PATCH 126/184] Re-introduce internal runtime to `VssStore` In order to avoid the recently, discovered blocking-task-deadlock (in which the task holding the runtime reactor got blocked and hence stopped polling VSS write tasks), we where re-introduce an internal runtime to the `VssStore`, on which we spawn the tasks, while still using `block_on` of our regular runtime for async-sync conversions. This also finally fixes our VSS CI. --- src/io/vss_store.rs | 218 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 169 insertions(+), 49 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 134ff7af2..4f90dd6db 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -11,7 +11,7 @@ use std::future::Future; #[cfg(test)] use std::panic::RefUnwindSafe; use std::pin::Pin; -use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -44,6 +44,11 @@ type CustomRetryPolicy = FilteredRetryPolicy< Box bool + 'static + Send + Sync>, >; +// We set this to a small number of threads that would still allow to make some progress if one +// would hit a blocking case +const INTERNAL_RUNTIME_WORKERS: usize = 2; +const VSS_IO_TIMEOUT: Duration = Duration::from_secs(5); + /// A [`KVStoreSync`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. pub struct VssStore { inner: Arc, @@ -51,6 +56,13 @@ pub struct VssStore { // operations aren't sensitive to the order of execution. next_version: AtomicU64, runtime: Arc, + // A VSS-internal runtime we use to avoid any deadlocks we could hit when waiting on a spawned + // blocking task to finish while the blocked thread had acquired the reactor. In particular, + // this works around a previously-hit case where a concurrent call to + // `PeerManager::process_pending_events` -> `ChannelManager::get_and_clear_pending_msg_events` + // would deadlock when trying to acquire sync `Mutex` locks that are held by the thread + // currently being blocked waiting on the VSS operation to finish. + internal_runtime: Option, } impl VssStore { @@ -60,7 +72,21 @@ impl VssStore { ) -> Self { let inner = Arc::new(VssStoreInner::new(base_url, store_id, vss_seed, header_provider)); let next_version = AtomicU64::new(1); - Self { inner, next_version, runtime } + let internal_runtime = Some( + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name_fn(|| { + static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst); + format!("ldk-node-vss-runtime-{}", id) + }) + .worker_threads(INTERNAL_RUNTIME_WORKERS) + .max_blocking_threads(INTERNAL_RUNTIME_WORKERS) + .build() + .unwrap(), + ); + + Self { inner, next_version, runtime, internal_runtime } } // Same logic as for the obfuscated keys below, but just for locking, using the plaintext keys @@ -94,46 +120,122 @@ impl KVStoreSync for VssStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { - let fut = self.inner.read_internal(primary_namespace, secondary_namespace, key); - self.runtime.block_on(fut) + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { + debug_assert!(false, "Failed to access internal runtime"); + let msg = format!("Failed to access internal runtime"); + Error::new(ErrorKind::Other, msg) + })?; + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = + async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }; + // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always + // times out. + let spawned_fut = internal_runtime.spawn(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::read timed out"; + Error::new(ErrorKind::Other, msg) + }) + }); + self.runtime.block_on(spawned_fut).expect("We should always finish")? } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { - let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { + debug_assert!(false, "Failed to access internal runtime"); + let msg = format!("Failed to access internal runtime"); + Error::new(ErrorKind::Other, msg) + })?; + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); - let fut = self.inner.write_internal( - inner_lock_ref, - locking_key, - version, - primary_namespace, - secondary_namespace, - key, - buf, - ); - self.runtime.block_on(fut) + let fut = async move { + inner + .write_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ) + .await + }; + // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always + // times out. + let spawned_fut = internal_runtime.spawn(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::write timed out"; + Error::new(ErrorKind::Other, msg) + }) + }); + self.runtime.block_on(spawned_fut).expect("We should always finish")? } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result<()> { - let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { + debug_assert!(false, "Failed to access internal runtime"); + let msg = format!("Failed to access internal runtime"); + Error::new(ErrorKind::Other, msg) + })?; + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); - let fut = self.inner.remove_internal( - inner_lock_ref, - locking_key, - version, - primary_namespace, - secondary_namespace, - key, - ); - self.runtime.block_on(fut) + let fut = async move { + inner + .remove_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + ) + .await + }; + // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always + // times out. + let spawned_fut = internal_runtime.spawn(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::remove timed out"; + Error::new(ErrorKind::Other, msg) + }) + }); + self.runtime.block_on(spawned_fut).expect("We should always finish")? } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { - let fut = self.inner.list_internal(primary_namespace, secondary_namespace); - self.runtime.block_on(fut) + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { + debug_assert!(false, "Failed to access internal runtime"); + let msg = format!("Failed to access internal runtime"); + Error::new(ErrorKind::Other, msg) + })?; + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let inner = Arc::clone(&self.inner); + let fut = async move { inner.list_internal(primary_namespace, secondary_namespace).await }; + // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always + // times out. + let spawned_fut = internal_runtime.spawn(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::list timed out"; + Error::new(ErrorKind::Other, msg) + }) + }); + self.runtime.block_on(spawned_fut).expect("We should always finish")? } } @@ -145,9 +247,9 @@ impl KVStore for VssStore { let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); let inner = Arc::clone(&self.inner); - Box::pin(async move { - inner.read_internal(&primary_namespace, &secondary_namespace, &key).await - }) + Box::pin( + async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }, + ) } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, @@ -164,9 +266,9 @@ impl KVStore for VssStore { inner_lock_ref, locking_key, version, - &primary_namespace, - &secondary_namespace, - &key, + primary_namespace, + secondary_namespace, + key, buf, ) .await @@ -187,9 +289,9 @@ impl KVStore for VssStore { inner_lock_ref, locking_key, version, - &primary_namespace, - &secondary_namespace, - &key, + primary_namespace, + secondary_namespace, + key, ) .await }) @@ -200,7 +302,14 @@ impl KVStore for VssStore { let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); - Box::pin(async move { inner.list_internal(&primary_namespace, &secondary_namespace).await }) + Box::pin(async move { inner.list_internal(primary_namespace, secondary_namespace).await }) + } +} + +impl Drop for VssStore { + fn drop(&mut self) { + let internal_runtime = self.internal_runtime.take(); + tokio::task::block_in_place(move || drop(internal_runtime)); } } @@ -300,11 +409,12 @@ impl VssStoreInner { } async fn read_internal( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + &self, primary_namespace: String, secondary_namespace: String, key: String, ) -> io::Result> { - check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; + check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; - let obfuscated_key = self.build_obfuscated_key(primary_namespace, secondary_namespace, key); + let obfuscated_key = + self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); let request = GetObjectRequest { store_id: self.store_id.clone(), key: obfuscated_key }; let resp = self.client.get_object(&request).await.map_err(|e| { let msg = format!( @@ -332,13 +442,18 @@ impl VssStoreInner { async fn write_internal( &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, ) -> io::Result<()> { - check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "write", + )?; self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { let obfuscated_key = - self.build_obfuscated_key(primary_namespace, secondary_namespace, key); + self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); let vss_version = -1; let storable = self.storable_builder.build(buf, vss_version); let request = PutObjectRequest { @@ -367,13 +482,18 @@ impl VssStoreInner { async fn remove_internal( &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: &str, secondary_namespace: &str, key: &str, + primary_namespace: String, secondary_namespace: String, key: String, ) -> io::Result<()> { - check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "remove", + )?; self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { let obfuscated_key = - self.build_obfuscated_key(primary_namespace, secondary_namespace, key); + self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); let request = DeleteObjectRequest { store_id: self.store_id.clone(), key_value: Some(KeyValue { key: obfuscated_key, version: -1, value: vec![] }), @@ -393,12 +513,12 @@ impl VssStoreInner { } async fn list_internal( - &self, primary_namespace: &str, secondary_namespace: &str, + &self, primary_namespace: String, secondary_namespace: String, ) -> io::Result> { - check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; + check_namespace_key_validity(&primary_namespace, &secondary_namespace, None, "list")?; let keys = - self.list_all_keys(primary_namespace, secondary_namespace).await.map_err(|e| { + self.list_all_keys(&primary_namespace, &secondary_namespace).await.map_err(|e| { let msg = format!( "Failed to retrieve keys in namespace: {}/{} : {}", primary_namespace, secondary_namespace, e From 8cad63b3b022e27ee17315f75cfa30adcdbf3a95 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 20 Oct 2025 15:33:37 -0500 Subject: [PATCH 127/184] Fix and run all tests under `cfg(vss_test)` .. we previously avoided running some tests which turned out to be broken. --- .github/workflows/vss-integration.yml | 1 + src/io/vss_store.rs | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index 5f6e6065b..8473ed413 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -44,4 +44,5 @@ jobs: run: | cd ldk-node export TEST_VSS_BASE_URL="http://localhost:8080/vss" + RUSTFLAGS="--cfg vss_test" cargo test io::vss_store RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 4f90dd6db..ed8e13890 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -606,38 +606,40 @@ mod tests { use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng, RngCore}; - use tokio::runtime; use vss_client::headers::FixedHeaders; use super::*; use crate::io::test_utils::do_read_write_remove_list_persist; + use crate::logger::Logger; #[test] fn vss_read_write_remove_list_persist() { - let runtime = Arc::new(Runtime::new().unwrap()); let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); let mut rng = thread_rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); + let logger = Arc::new(Logger::new_log_facade()); + let runtime = Arc::new(Runtime::new(logger).unwrap()); let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime).unwrap(); + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); do_read_write_remove_list_persist(&vss_store); } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn vss_read_write_remove_list_persist_in_runtime_context() { - let runtime = Arc::new(Runtime::new().unwrap()); let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); let mut rng = thread_rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); + let logger = Arc::new(Logger::new_log_facade()); + let runtime = Arc::new(Runtime::new(logger).unwrap()); let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime).unwrap(); + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); do_read_write_remove_list_persist(&vss_store); drop(vss_store) From ce4c5847200efc23ba5fb8dba8f2b13dae018ee7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 28 Oct 2025 10:38:32 +0100 Subject: [PATCH 128/184] Bump `electrsd` to 0.36.1 We bump our `electrsd` dependency to the latest version, fixing CI. --- Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a70e74dd4..5876efc48 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,11 +116,11 @@ proptest = "1.0.0" regex = "1.5.6" [target.'cfg(not(no_download))'.dev-dependencies] -electrsd = { version = "0.35.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } +electrsd = { version = "0.36.1", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } [target.'cfg(no_download)'.dev-dependencies] -electrsd = { version = "0.35.0", default-features = false, features = ["legacy"] } -corepc-node = { version = "0.8.0", default-features = false, features = ["27_2"] } +electrsd = { version = "0.36.1", default-features = false, features = ["legacy"] } +corepc-node = { version = "0.10.0", default-features = false, features = ["27_2"] } [target.'cfg(cln_test)'.dev-dependencies] clightningrpc = { version = "0.3.0-beta.8", default-features = false } From 76194c7ee78fc38f1a3efcaf93528102be4ba1c3 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 29 Oct 2025 09:00:50 +0100 Subject: [PATCH 129/184] Upgrade `rand` to v0.9.2 We bump our rand dependency to the latest stable version. --- Cargo.toml | 2 +- src/event.rs | 4 ++-- src/io/test_utils.rs | 6 +++--- src/io/utils.rs | 6 +++--- src/io/vss_store.rs | 10 +++++----- src/lib.rs | 2 +- src/liquidity.rs | 2 +- src/payment/bolt12.rs | 6 +++--- tests/common/mod.rs | 14 +++++++------- tests/integration_tests_cln.rs | 6 +++--- 10 files changed, 29 insertions(+), 29 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a70e74dd4..794f7b859 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,7 +90,7 @@ bip39 = "2.0.0" bip21 = { version = "0.5", features = ["std"], default-features = false } base64 = { version = "0.22.1", default-features = false, features = ["std"] } -rand = "0.8.5" +rand = { version = "0.9.2", default-features = false, features = ["std", "thread_rng", "os_rng"] } chrono = { version = "0.4", default-features = false, features = ["clock"] } tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } esplora-client = { version = "0.12", default-features = false, features = ["tokio", "async-https-rustls"] } diff --git a/src/event.rs b/src/event.rs index eedfb1c14..13913466c 100644 --- a/src/event.rs +++ b/src/event.rs @@ -30,7 +30,7 @@ use lightning::util::persist::KVStoreSync; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_types::payment::{PaymentHash, PaymentPreimage}; -use rand::{thread_rng, Rng}; +use rand::{rng, Rng}; use crate::config::{may_announce_channel, Config}; use crate::connection::ConnectionManager; @@ -1137,7 +1137,7 @@ where } } - let user_channel_id: u128 = thread_rng().gen::(); + let user_channel_id: u128 = rng().random::(); let allow_0conf = self.config.trusted_peers_0conf.contains(&counterparty_node_id); let mut channel_override_config = None; if let Some((lsp_node_id, _)) = self diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 59ad09458..84517a695 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -18,8 +18,8 @@ use lightning::util::persist::{ }; use lightning::util::test_utils; use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< &'a K, @@ -34,7 +34,7 @@ const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5; pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_dir: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); temp_path.push(rand_dir); temp_path diff --git a/src/io/utils.rs b/src/io/utils.rs index 98993ff11..c723ca26b 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -35,7 +35,7 @@ use lightning::util::persist::{ }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use lightning_types::string::PrintableString; -use rand::{thread_rng, RngCore}; +use rand::{rng, RngCore}; use super::*; use crate::chain::ChainSource; @@ -63,7 +63,7 @@ pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_sc pub fn generate_entropy_mnemonic() -> Mnemonic { // bip39::Mnemonic supports 256 bit entropy max let mut entropy = [0; 32]; - thread_rng().fill_bytes(&mut entropy); + rng().fill_bytes(&mut entropy); Mnemonic::from_entropy(&entropy).unwrap() } @@ -96,7 +96,7 @@ where Ok(key) } else { let mut key = [0; WALLET_KEYS_SEED_LEN]; - thread_rng().fill_bytes(&mut key); + rng().fill_bytes(&mut key); if let Some(parent_dir) = Path::new(&keys_seed_path).parent() { fs::create_dir_all(parent_dir).map_err(|e| { diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index ed8e13890..d97c47f81 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -592,7 +592,7 @@ pub(crate) struct RandEntropySource; impl EntropySource for RandEntropySource { fn fill_bytes(&self, buffer: &mut [u8]) { - rand::thread_rng().fill_bytes(buffer); + rand::rng().fill_bytes(buffer); } } @@ -604,8 +604,8 @@ impl RefUnwindSafe for VssStore {} mod tests { use std::collections::HashMap; - use rand::distributions::Alphanumeric; - use rand::{thread_rng, Rng, RngCore}; + use rand::distr::Alphanumeric; + use rand::{rng, Rng, RngCore}; use vss_client::headers::FixedHeaders; use super::*; @@ -615,7 +615,7 @@ mod tests { #[test] fn vss_read_write_remove_list_persist() { let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); @@ -631,7 +631,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn vss_read_write_remove_list_persist_in_runtime_context() { let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); diff --git a/src/lib.rs b/src/lib.rs index 6a26c6c5b..7d8aff4b3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1117,7 +1117,7 @@ impl Node { } let push_msat = push_to_counterparty_msat.unwrap_or(0); - let user_channel_id: u128 = rand::thread_rng().gen::(); + let user_channel_id: u128 = rand::rng().random::(); match self.channel_manager.create_channel( peer_info.node_id, diff --git a/src/liquidity.rs b/src/liquidity.rs index 81d48e530..401222c47 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -562,7 +562,7 @@ where return; }; - let user_channel_id: u128 = rand::thread_rng().gen::(); + let user_channel_id: u128 = rand::rng().random::(); let intercept_scid = self.channel_manager.get_intercept_scid(); if let Some(payment_size_msat) = payment_size_msat { diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 337eedf96..2b299739d 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -84,7 +84,7 @@ impl Bolt12Payment { let offer = maybe_deref(offer); let mut random_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut random_bytes); + rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let route_params_config = RouteParametersConfig::default(); @@ -191,7 +191,7 @@ impl Bolt12Payment { let offer = maybe_deref(offer); let mut random_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut random_bytes); + rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let route_params_config = RouteParametersConfig::default(); @@ -408,7 +408,7 @@ impl Bolt12Payment { payer_note: Option, ) -> Result { let mut random_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut random_bytes); + rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 05326b03d..c96ab8b36 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -43,8 +43,8 @@ use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_persister::fs_store::FilesystemStore; use lightning_types::payment::{PaymentHash, PaymentPreimage}; use logging::TestLogWriter; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; use serde_json::{json, Value}; macro_rules! expect_event { @@ -191,15 +191,15 @@ pub(crate) fn setup_bitcoind_and_electrsd() -> (BitcoinD, ElectrsD) { pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_dir: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); temp_path.push(rand_dir); temp_path } pub(crate) fn random_port() -> u16 { - let mut rng = thread_rng(); - rng.gen_range(5000..32768) + let mut rng = rng(); + rng.random_range(5000..32768) } pub(crate) fn random_listening_addresses() -> Vec { @@ -216,8 +216,8 @@ pub(crate) fn random_listening_addresses() -> Vec { } pub(crate) fn random_node_alias() -> Option { - let mut rng = thread_rng(); - let rand_val = rng.gen_range(0..1000); + let mut rng = rng(); + let rand_val = rng.random_range(0..1000); let alias = format!("ldk-node-{}", rand_val); let mut bytes = [0u8; 32]; bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 38e345f15..e8eb72a1d 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -22,8 +22,8 @@ use ldk_node::bitcoin::Amount; use ldk_node::lightning::ln::msgs::SocketAddress; use ldk_node::{Builder, Event}; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_cln() { @@ -99,7 +99,7 @@ async fn test_cln() { let user_channel_id = common::expect_channel_ready_event!(node, cln_node_id); // Send a payment to CLN - let mut rng = thread_rng(); + let mut rng = rng(); let rand_label: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let cln_invoice = cln_client.invoice(Some(10_000_000), &rand_label, &rand_label, None, None, None).unwrap(); From e1739971794d668531916cdd58e798b7e9d10e67 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 29 Oct 2025 09:02:47 +0100 Subject: [PATCH 130/184] Use `os_rng` for seed/mnemonic generation The previously-used `thread_rng` should be fine, but `os_rng` is guaranteed to block until there is sufficient entropy available (e.g., after startup), which might slightly improve security here. --- src/io/utils.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/io/utils.rs b/src/io/utils.rs index c723ca26b..d92c9486b 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -35,7 +35,8 @@ use lightning::util::persist::{ }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use lightning_types::string::PrintableString; -use rand::{rng, RngCore}; +use rand::rngs::OsRng; +use rand::TryRngCore; use super::*; use crate::chain::ChainSource; @@ -63,7 +64,7 @@ pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_sc pub fn generate_entropy_mnemonic() -> Mnemonic { // bip39::Mnemonic supports 256 bit entropy max let mut entropy = [0; 32]; - rng().fill_bytes(&mut entropy); + OsRng.try_fill_bytes(&mut entropy).expect("Failed to generate entropy"); Mnemonic::from_entropy(&entropy).unwrap() } @@ -96,7 +97,10 @@ where Ok(key) } else { let mut key = [0; WALLET_KEYS_SEED_LEN]; - rng().fill_bytes(&mut key); + OsRng.try_fill_bytes(&mut key).map_err(|e| { + log_error!(logger, "Failed to generate entropy: {}", e); + std::io::Error::new(std::io::ErrorKind::Other, "Failed to generate seed bytes") + })?; if let Some(parent_dir) = Path::new(&keys_seed_path).parent() { fs::create_dir_all(parent_dir).map_err(|e| { From 7063f2c0b09776c4f89b7c7de06c99e7b98e09fe Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 30 Oct 2025 09:45:32 +0100 Subject: [PATCH 131/184] No turbofishing for `user_channel_id` --- src/event.rs | 2 +- src/lib.rs | 2 +- src/liquidity.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/event.rs b/src/event.rs index 13913466c..1946350a3 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1137,7 +1137,7 @@ where } } - let user_channel_id: u128 = rng().random::(); + let user_channel_id: u128 = rng().random(); let allow_0conf = self.config.trusted_peers_0conf.contains(&counterparty_node_id); let mut channel_override_config = None; if let Some((lsp_node_id, _)) = self diff --git a/src/lib.rs b/src/lib.rs index 7d8aff4b3..482866a14 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1117,7 +1117,7 @@ impl Node { } let push_msat = push_to_counterparty_msat.unwrap_or(0); - let user_channel_id: u128 = rand::rng().random::(); + let user_channel_id: u128 = rand::rng().random(); match self.channel_manager.create_channel( peer_info.node_id, diff --git a/src/liquidity.rs b/src/liquidity.rs index 401222c47..57e2ad488 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -562,7 +562,7 @@ where return; }; - let user_channel_id: u128 = rand::rng().random::(); + let user_channel_id: u128 = rand::rng().random(); let intercept_scid = self.channel_manager.get_intercept_scid(); if let Some(payment_size_msat) = payment_size_msat { From 126c5da999e1d136aa527e143d7b7d098f3544e6 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 30 Oct 2025 13:05:42 +0100 Subject: [PATCH 132/184] Add simple workflow to generate Swift bindings in CI We add a simple workflows to generate the Swift bindings in CI, ensuring the build is succeeding. --- .github/workflows/swift.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/swift.yml diff --git a/.github/workflows/swift.yml b/.github/workflows/swift.yml new file mode 100644 index 000000000..3410d09aa --- /dev/null +++ b/.github/workflows/swift.yml @@ -0,0 +1,21 @@ +name: CI Checks - Swift Tests + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + check-swift: + runs-on: macos-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set default Rust version to stable + run: rustup default stable + + - name: Generate Swift bindings + run: ./scripts/uniffi_bindgen_generate_swift.sh From 15a1b90aee78d74abb5c0ae8f63a2a23665a7a8f Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 30 Oct 2025 13:10:12 +0100 Subject: [PATCH 133/184] Ensure for all workflows we cancel in-progress jobs on repeated pushes .. no need to keep jobs running if we pushed once more. --- .github/workflows/cln-integration.yml | 4 ++++ .github/workflows/kotlin.yml | 4 ++++ .github/workflows/lnd-integration.yml | 6 +++++- .github/workflows/python.yml | 4 ++++ 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cln-integration.yml b/.github/workflows/cln-integration.yml index 2c427cbde..32e7b74c0 100644 --- a/.github/workflows/cln-integration.yml +++ b/.github/workflows/cln-integration.yml @@ -2,6 +2,10 @@ name: CI Checks - CLN Integration Tests on: [push, pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: check-cln: runs-on: ubuntu-latest diff --git a/.github/workflows/kotlin.yml b/.github/workflows/kotlin.yml index a1711ba49..01a840d60 100644 --- a/.github/workflows/kotlin.yml +++ b/.github/workflows/kotlin.yml @@ -2,6 +2,10 @@ name: CI Checks - Kotlin Tests on: [push, pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: check-kotlin: runs-on: ubuntu-latest diff --git a/.github/workflows/lnd-integration.yml b/.github/workflows/lnd-integration.yml index 219e929b1..f913e92ad 100644 --- a/.github/workflows/lnd-integration.yml +++ b/.github/workflows/lnd-integration.yml @@ -2,6 +2,10 @@ name: CI Checks - LND Integration Tests on: [push, pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: check-lnd: runs-on: ubuntu-latest @@ -49,4 +53,4 @@ jobs: run: LND_CERT_PATH=$LND_DATA_DIR/tls.cert LND_MACAROON_PATH=$LND_DATA_DIR/data/chain/bitcoin/regtest/admin.macaroon RUSTFLAGS="--cfg lnd_test" cargo test --test integration_tests_lnd -- --exact --show-output env: - LND_DATA_DIR: ${{ env.LND_DATA_DIR }} \ No newline at end of file + LND_DATA_DIR: ${{ env.LND_DATA_DIR }} diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 21c0139a2..d9bc978d1 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -2,6 +2,10 @@ name: CI Checks - Python Tests on: [push, pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: check-python: runs-on: ubuntu-latest From faebb30d08151ec6bc654da927c084d7aa21656a Mon Sep 17 00:00:00 2001 From: benthecarman Date: Tue, 28 Oct 2025 13:41:05 -0500 Subject: [PATCH 134/184] Fix bitcoind shutdown hang Previously, the shutdown process could hang when using the bitcoind backend because the syncing process was not always checking if we had received the shutdown signal. Now with any future we call during the sync process we will use a `tokio::select!` to make sure we abort early if we receive a stop signal. --- src/chain/bitcoind.rs | 70 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 10 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index a0151e5a2..4b7cd588f 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -147,6 +147,12 @@ impl BitcoindChainSource { const MAX_BACKOFF_SECS: u64 = 300; loop { + // if the stop_sync_sender has been dropped, we should just exit + if stop_sync_receiver.has_changed().unwrap_or(true) { + log_trace!(self.logger, "Stopping initial chain sync."); + return; + } + let channel_manager_best_block_hash = channel_manager.current_best_block().block_hash; let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; let onchain_wallet_best_block_hash = @@ -226,7 +232,18 @@ impl BitcoindChainSource { e, backoff ); - tokio::time::sleep(Duration::from_secs(backoff)).await; + // Sleep with stop signal check to allow immediate shutdown + tokio::select! { + biased; + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping initial chain sync.", + ); + return; + } + _ = tokio::time::sleep(Duration::from_secs(backoff)) => {} + } backoff = std::cmp::min(backoff * 2, MAX_BACKOFF_SECS); } else { log_error!( @@ -235,7 +252,18 @@ impl BitcoindChainSource { e, MAX_BACKOFF_SECS ); - tokio::time::sleep(Duration::from_secs(MAX_BACKOFF_SECS)).await; + // Sleep with stop signal check to allow immediate shutdown + tokio::select! { + biased; + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping initial chain sync during backoff.", + ); + return; + } + _ = tokio::time::sleep(Duration::from_secs(MAX_BACKOFF_SECS)) => {} + } } }, } @@ -260,6 +288,7 @@ impl BitcoindChainSource { let mut last_best_block_hash = None; loop { tokio::select! { + biased; _ = stop_sync_receiver.changed() => { log_trace!( self.logger, @@ -268,17 +297,38 @@ impl BitcoindChainSource { return; } _ = chain_polling_interval.tick() => { - let _ = self.poll_and_update_listeners( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&output_sweeper) - ).await; + tokio::select! { + biased; + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping polling for new chain data.", + ); + return; + } + _ = self.poll_and_update_listeners( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&output_sweeper) + ) => {} + } } _ = fee_rate_update_interval.tick() => { if last_best_block_hash != Some(channel_manager.current_best_block().block_hash) { - let update_res = self.update_fee_rate_estimates().await; - if update_res.is_ok() { - last_best_block_hash = Some(channel_manager.current_best_block().block_hash); + tokio::select! { + biased; + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping polling for new chain data.", + ); + return; + } + update_res = self.update_fee_rate_estimates() => { + if update_res.is_ok() { + last_best_block_hash = Some(channel_manager.current_best_block().block_hash); + } + } } } } From 916e0ef3843aead2b7e02c3cb1b33a1154c20144 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 31 Oct 2025 11:33:37 +0100 Subject: [PATCH 135/184] Bump LDK to 2.0.0-rc1 We bump the LDK depedencies to the just-released 2.0 release candidate and account for minor last-minute API changes. --- Cargo.toml | 24 ++++++++++++------------ src/data_store.rs | 1 + src/io/sqlite_store/mod.rs | 4 ++-- src/io/test_utils.rs | 4 ++-- src/io/vss_store.rs | 4 ++-- tests/common/mod.rs | 30 ++++++++++++++++++++---------- 6 files changed, 39 insertions(+), 28 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5876efc48..51b0329c4 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,17 +29,17 @@ panic = 'abort' # Abort on panic default = [] [dependencies] -lightning = { version = "0.2.0-beta1", features = ["std"] } -lightning-types = { version = "0.3.0-beta1" } -lightning-invoice = { version = "0.34.0-beta1", features = ["std"] } -lightning-net-tokio = { version = "0.2.0-beta1" } -lightning-persister = { version = "0.2.0-beta1", features = ["tokio"] } -lightning-background-processor = { version = "0.2.0-beta1" } -lightning-rapid-gossip-sync = { version = "0.2.0-beta1" } -lightning-block-sync = { version = "0.2.0-beta1", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { version = "0.2.0-beta1", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { version = "0.2.0-beta1", features = ["std"] } -lightning-macros = { version = "0.2.0-beta1" } +lightning = { version = "0.2.0-rc1", features = ["std"] } +lightning-types = { version = "0.3.0-rc1" } +lightning-invoice = { version = "0.34.0-rc1", features = ["std"] } +lightning-net-tokio = { version = "0.2.0-rc1" } +lightning-persister = { version = "0.2.0-rc1", features = ["tokio"] } +lightning-background-processor = { version = "0.2.0-rc1" } +lightning-rapid-gossip-sync = { version = "0.2.0-rc1" } +lightning-block-sync = { version = "0.2.0-rc1", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { version = "0.2.0-rc1", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { version = "0.2.0-rc1", features = ["std"] } +lightning-macros = { version = "0.2.0-rc1" } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } #lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } @@ -108,7 +108,7 @@ prost = { version = "0.11.6", default-features = false} winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.2.0-beta1", features = ["std", "_test_utils"] } +lightning = { version = "0.2.0-rc1", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } diff --git a/src/data_store.rs b/src/data_store.rs index ce4b294e0..83cbf4476 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -103,6 +103,7 @@ where &self.primary_namespace, &self.secondary_namespace, &store_key, + false, ) .map_err(|e| { log_error!( diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index c41df8ea0..789330cef 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -137,7 +137,7 @@ impl KVStore for SqliteStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> Pin> + Send>> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); @@ -205,7 +205,7 @@ impl KVStoreSync for SqliteStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> io::Result<()> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 59ad09458..059e66aee 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -68,7 +68,7 @@ pub(crate) fn do_read_write_remove_list_persist( let read_data = kv_store.read(primary_namespace, secondary_namespace, key).unwrap(); assert_eq!(data, &*read_data); - kv_store.remove(primary_namespace, secondary_namespace, key).unwrap(); + kv_store.remove(primary_namespace, secondary_namespace, key, false).unwrap(); let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap(); assert_eq!(listed_keys.len(), 0); @@ -84,7 +84,7 @@ pub(crate) fn do_read_write_remove_list_persist( let read_data = kv_store.read(&max_chars, &max_chars, &max_chars).unwrap(); assert_eq!(data, &*read_data); - kv_store.remove(&max_chars, &max_chars, &max_chars).unwrap(); + kv_store.remove(&max_chars, &max_chars, &max_chars, false).unwrap(); let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap(); assert_eq!(listed_keys.len(), 0); diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index ed8e13890..028eb87e4 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -181,7 +181,7 @@ impl KVStoreSync for VssStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> io::Result<()> { let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { debug_assert!(false, "Failed to access internal runtime"); @@ -275,7 +275,7 @@ impl KVStore for VssStore { }) } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, ) -> Pin> + Send>> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 05326b03d..058a8df19 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1244,14 +1244,14 @@ impl KVStore for TestSyncStore { }) } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> Pin> + Send>> { let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); let inner = Arc::clone(&self.inner); let fut = tokio::task::spawn_blocking(move || { - inner.remove_internal(&primary_namespace, &secondary_namespace, &key) + inner.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy) }); Box::pin(async move { fut.await.unwrap_or_else(|e| { @@ -1292,9 +1292,9 @@ impl KVStoreSync for TestSyncStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> lightning::io::Result<()> { - self.inner.remove_internal(primary_namespace, secondary_namespace, key) + self.inner.remove_internal(primary_namespace, secondary_namespace, key, lazy) } fn list( @@ -1432,15 +1432,25 @@ impl TestSyncStoreInner { } fn remove_internal( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); let fs_res = - KVStoreSync::remove(&self.fs_store, primary_namespace, secondary_namespace, key); - let sqlite_res = - KVStoreSync::remove(&self.sqlite_store, primary_namespace, secondary_namespace, key); - let test_res = - KVStoreSync::remove(&self.test_store, primary_namespace, secondary_namespace, key); + KVStoreSync::remove(&self.fs_store, primary_namespace, secondary_namespace, key, lazy); + let sqlite_res = KVStoreSync::remove( + &self.sqlite_store, + primary_namespace, + secondary_namespace, + key, + lazy, + ); + let test_res = KVStoreSync::remove( + &self.test_store, + primary_namespace, + secondary_namespace, + key, + lazy, + ); assert!(!self .do_list(primary_namespace, secondary_namespace) From c554a242bc903776ec2122ebaeedc4b2c6b530a3 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 30 Oct 2025 14:23:20 +0100 Subject: [PATCH 136/184] Avoid collision of `description` on `Offer`, `Refund`, `Bolt12Invoice` Swift will auto-generate a `description` accessor. We here avoid any conflicting methods in our API. --- bindings/ldk_node.udl | 6 +++--- src/ffi/types.rs | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 9da0d89b6..ab2f483a1 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -771,7 +771,7 @@ interface Offer { constructor([ByRef] string offer_str); OfferId id(); boolean is_expired(); - string? description(); + string? offer_description(); string? issuer(); OfferAmount? amount(); boolean is_valid_quantity(u64 quantity); @@ -787,7 +787,7 @@ interface Offer { interface Refund { [Throws=NodeError, Name=from_str] constructor([ByRef] string refund_str); - string description(); + string refund_description(); u64? absolute_expiry_seconds(); boolean is_expired(); string? issuer(); @@ -810,7 +810,7 @@ interface Bolt12Invoice { u64? absolute_expiry_seconds(); u64 relative_expiry(); boolean is_expired(); - string? description(); + string? invoice_description(); string? issuer(); string? payer_note(); sequence? metadata(); diff --git a/src/ffi/types.rs b/src/ffi/types.rs index b64bd730e..3c88a665f 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -159,7 +159,7 @@ impl Offer { /// A complete description of the purpose of the payment. /// /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. - pub fn description(&self) -> Option { + pub fn offer_description(&self) -> Option { self.inner.description().map(|printable| printable.to_string()) } @@ -288,7 +288,7 @@ impl Refund { /// A complete description of the purpose of the refund. /// /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. - pub fn description(&self) -> String { + pub fn refund_description(&self) -> String { self.inner.description().to_string() } @@ -466,7 +466,7 @@ impl Bolt12Invoice { /// /// [`Offer::description`]: lightning::offers::offer::Offer::description /// [`Refund::description`]: lightning::offers::refund::Refund::description - pub fn description(&self) -> Option { + pub fn invoice_description(&self) -> Option { self.inner.description().map(|printable| printable.to_string()) } @@ -1359,7 +1359,7 @@ mod tests { #[test] fn test_offer() { let (ldk_offer, wrapped_offer) = create_test_offer(); - match (ldk_offer.description(), wrapped_offer.description()) { + match (ldk_offer.description(), wrapped_offer.offer_description()) { (Some(ldk_desc), Some(wrapped_desc)) => { assert_eq!(ldk_desc.to_string(), wrapped_desc); }, @@ -1481,7 +1481,7 @@ mod tests { fn test_refund_properties() { let (ldk_refund, wrapped_refund) = create_test_refund(); - assert_eq!(ldk_refund.description().to_string(), wrapped_refund.description()); + assert_eq!(ldk_refund.description().to_string(), wrapped_refund.refund_description()); assert_eq!(ldk_refund.amount_msats(), wrapped_refund.amount_msats()); assert_eq!(ldk_refund.is_expired(), wrapped_refund.is_expired()); @@ -1572,7 +1572,7 @@ mod tests { assert_eq!(ldk_invoice.relative_expiry().as_secs(), wrapped_invoice.relative_expiry()); - match (ldk_invoice.description(), wrapped_invoice.description()) { + match (ldk_invoice.description(), wrapped_invoice.invoice_description()) { (Some(ldk_desc), Some(wrapped_desc)) => { assert_eq!(ldk_desc.to_string(), wrapped_desc); }, From 2c99aa04e77bcb394ad4a8af20d70825f7aff306 Mon Sep 17 00:00:00 2001 From: Chuks Agbakuru Date: Fri, 31 Oct 2025 13:27:56 +0100 Subject: [PATCH 137/184] Implement Display for UserChannelId --- src/types.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/types.rs b/src/types.rs index 800d9462d..71512b2cd 100644 --- a/src/types.rs +++ b/src/types.rs @@ -5,6 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::fmt; use std::sync::{Arc, Mutex}; use bitcoin::secp256k1::PublicKey; @@ -192,6 +193,12 @@ impl Readable for UserChannelId { } } +impl fmt::Display for UserChannelId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "UserChannelId({})", self.0) + } +} + /// Details of a channel as returned by [`Node::list_channels`]. /// /// [`Node::list_channels`]: crate::Node::list_channels From 222321d61242a635b5d77b423b25dd3c6ae9b196 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 15 Oct 2025 09:58:58 +0200 Subject: [PATCH 138/184] Configurable test store To enable more realistic testing with sqlite as a backend. --- tests/common/mod.rs | 45 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 058a8df19..0d6ba30ae 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -29,10 +29,11 @@ use electrsd::corepc_node::{Client as BitcoindClient, Node as BitcoinD}; use electrsd::{corepc_node, ElectrsD}; use electrum_client::ElectrumApi; use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; -use ldk_node::io::sqlite_store::SqliteStore; +use ldk_node::io::sqlite_store::{SqliteStore, KV_TABLE_NAME, SQLITE_DB_FILE_NAME}; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, + Builder, CustomTlvRecord, DynStore, Event, LightningBalance, Node, NodeError, + PendingSweepBalance, }; use lightning::io; use lightning::ln::msgs::SocketAddress; @@ -262,10 +263,23 @@ pub(crate) enum TestChainSource<'a> { BitcoindRestSync(&'a BitcoinD), } +#[derive(Clone, Copy)] +pub(crate) enum TestStoreType { + TestSyncStore, + Sqlite, +} + +impl Default for TestStoreType { + fn default() -> Self { + TestStoreType::TestSyncStore + } +} + #[derive(Clone, Default)] pub(crate) struct TestConfig { pub node_config: Config, pub log_writer: TestLogWriter, + pub store_type: TestStoreType, } macro_rules! setup_builder { @@ -282,13 +296,28 @@ pub(crate) use setup_builder; pub(crate) fn setup_two_nodes( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, +) -> (TestNode, TestNode) { + setup_two_nodes_with_store( + chain_source, + allow_0conf, + anchor_channels, + anchors_trusted_no_reserve, + TestStoreType::TestSyncStore, + ) +} + +pub(crate) fn setup_two_nodes_with_store( + chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, + anchors_trusted_no_reserve: bool, store_type: TestStoreType, ) -> (TestNode, TestNode) { println!("== Node A =="); - let config_a = random_config(anchor_channels); + let mut config_a = random_config(anchor_channels); + config_a.store_type = store_type; let node_a = setup_node(chain_source, config_a, None); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); + config_b.store_type = store_type; if allow_0conf { config_b.node_config.trusted_peers_0conf.push(node_a.node_id()); } @@ -381,8 +410,14 @@ pub(crate) fn setup_node_for_async_payments( builder.set_async_payments_role(async_payments_role).unwrap(); - let test_sync_store = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.into())); - let node = builder.build_with_store(test_sync_store).unwrap(); + let node = match config.store_type { + TestStoreType::TestSyncStore => { + let kv_store = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.into())); + builder.build_with_store(kv_store).unwrap() + }, + TestStoreType::Sqlite => builder.build().unwrap(), + }; + node.start().unwrap(); assert!(node.status().is_running); assert!(node.status().latest_fee_rate_cache_update_timestamp.is_some()); From 14c1cff0ec19ba6871c53cda7990395cdbc5edfc Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 21 Oct 2025 13:57:41 +0200 Subject: [PATCH 139/184] Add async payment throughput benchmark Introduces a criterion-based benchmark that sends 1000 concurrent payments between two LDK nodes to measure total duration. Also adds a CI job to automatically run the benchmark. --- .github/workflows/benchmarks.yml | 46 ++++++++ Cargo.toml | 5 + benches/payments.rs | 195 +++++++++++++++++++++++++++++++ tests/common/mod.rs | 5 +- 4 files changed, 248 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/benchmarks.yml create mode 100644 benches/payments.rs diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml new file mode 100644 index 000000000..ef049ad85 --- /dev/null +++ b/.github/workflows/benchmarks.yml @@ -0,0 +1,46 @@ +name: CI Checks - Benchmarks + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + benchmark: + runs-on: ubuntu-latest + env: + TOOLCHAIN: stable + steps: + - name: Checkout source code + uses: actions/checkout@v3 + - name: Install Rust toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + rustup override set stable + - name: Enable caching for bitcoind + id: cache-bitcoind + uses: actions/cache@v4 + with: + path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + key: bitcoind-${{ runner.os }}-${{ runner.arch }} + - name: Enable caching for electrs + id: cache-electrs + uses: actions/cache@v4 + with: + path: bin/electrs-${{ runner.os }}-${{ runner.arch }} + key: electrs-${{ runner.os }}-${{ runner.arch }} + - name: Download bitcoind/electrs + if: "(steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" + run: | + source ./scripts/download_bitcoind_electrs.sh + mkdir bin + mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} + - name: Set bitcoind/electrs environment variables + run: | + echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + - name: Run benchmarks + run: | + cargo bench diff --git a/Cargo.toml b/Cargo.toml index 51b0329c4..701d9ddb3 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,6 +114,7 @@ lightning = { version = "0.2.0-rc1", features = ["std", "_test_utils"] } #lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" +criterion = { version = "0.7.0", features = ["async_tokio"] } [target.'cfg(not(no_download))'.dev-dependencies] electrsd = { version = "0.36.1", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } @@ -148,3 +149,7 @@ check-cfg = [ "cfg(cln_test)", "cfg(lnd_test)", ] + +[[bench]] +name = "payments" +harness = false diff --git a/benches/payments.rs b/benches/payments.rs new file mode 100644 index 000000000..75b7f0513 --- /dev/null +++ b/benches/payments.rs @@ -0,0 +1,195 @@ +#[path = "../tests/common/mod.rs"] +mod common; + +use std::time::Instant; +use std::{sync::Arc, time::Duration}; + +use bitcoin::hex::DisplayHex; +use bitcoin::Amount; +use common::{ + expect_channel_ready_event, generate_blocks_and_wait, premine_and_distribute_funds, + setup_bitcoind_and_electrsd, setup_two_nodes_with_store, TestChainSource, +}; +use criterion::{criterion_group, criterion_main, Criterion}; +use ldk_node::{Event, Node}; +use lightning_types::payment::{PaymentHash, PaymentPreimage}; +use rand::RngCore; +use tokio::task::{self}; + +use crate::common::open_channel_push_amt; + +fn spawn_payment(node_a: Arc, node_b: Arc, amount_msat: u64) { + let mut preimage_bytes = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut preimage_bytes); + let preimage = PaymentPreimage(preimage_bytes); + let payment_hash: PaymentHash = preimage.into(); + + // Spawn each payment as a separate async task + task::spawn(async move { + println!("{}: Starting payment", payment_hash.0.as_hex()); + + loop { + // Pre-check the HTLC slots to try to avoid the performance impact of a failed payment. + while node_a.list_channels()[0].next_outbound_htlc_limit_msat == 0 { + println!("{}: Waiting for HTLC slots to free up", payment_hash.0.as_hex()); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + let payment_id = node_a.spontaneous_payment().send_with_preimage( + amount_msat, + node_b.node_id(), + preimage, + None, + ); + + match payment_id { + Ok(payment_id) => { + println!( + "{}: Awaiting payment with id {}", + payment_hash.0.as_hex(), + payment_id + ); + break; + }, + Err(e) => { + println!("{}: Payment attempt failed: {:?}", payment_hash.0.as_hex(), e); + + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + }, + } + } + }); +} + +async fn send_payments(node_a: Arc, node_b: Arc) -> std::time::Duration { + let start = Instant::now(); + + let total_payments = 1000; + let amount_msat = 10_000_000; + + let mut success_count = 0; + for _ in 0..total_payments { + spawn_payment(node_a.clone(), node_b.clone(), amount_msat); + } + + while success_count < total_payments { + match node_a.next_event_async().await { + Event::PaymentSuccessful { payment_id, payment_hash, .. } => { + if let Some(id) = payment_id { + success_count += 1; + println!("{}: Payment with id {:?} completed", payment_hash.0.as_hex(), id); + } else { + println!("Payment completed (no payment_id)"); + } + }, + Event::PaymentFailed { payment_id, payment_hash, .. } => { + println!("{}: Payment {:?} failed", payment_hash.unwrap().0.as_hex(), payment_id); + + // The payment failed, so we need to respawn it. + spawn_payment(node_a.clone(), node_b.clone(), amount_msat); + }, + ref e => { + println!("Received non-payment event: {:?}", e); + }, + } + + node_a.event_handled().unwrap(); + } + + let duration = start.elapsed(); + println!("Time elapsed: {:?}", duration); + + // Send back the money for the next iteration. + let mut preimage_bytes = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut preimage_bytes); + node_b + .spontaneous_payment() + .send_with_preimage( + amount_msat * total_payments, + node_a.node_id(), + PaymentPreimage(preimage_bytes), + None, + ) + .ok() + .unwrap(); + + duration +} + +fn payment_benchmark(c: &mut Criterion) { + // Set up two nodes. Because this is slow, we reuse the same nodes for each sample. + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + + let (node_a, node_b) = setup_two_nodes_with_store( + &chain_source, + false, + true, + false, + common::TestStoreType::Sqlite, + ); + + let runtime = + tokio::runtime::Builder::new_multi_thread().worker_threads(4).enable_all().build().unwrap(); + + let node_a = Arc::new(node_a); + let node_b = Arc::new(node_b); + + // Fund the nodes and setup a channel between them. The criterion function cannot be async, so we need to execute + // the setup using a runtime. + let node_a_cloned = Arc::clone(&node_a); + let node_b_cloned = Arc::clone(&node_b); + runtime.block_on(async move { + let address_a = node_a_cloned.onchain_payment().new_address().unwrap(); + let premine_sat = 25_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premine_sat), + ) + .await; + node_a_cloned.sync_wallets().unwrap(); + node_b_cloned.sync_wallets().unwrap(); + open_channel_push_amt( + &node_a_cloned, + &node_b_cloned, + 16_000_000, + Some(1_000_000_000), + false, + &electrsd, + ) + .await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + node_a_cloned.sync_wallets().unwrap(); + node_b_cloned.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a_cloned, node_b_cloned.node_id()); + expect_channel_ready_event!(node_b_cloned, node_a_cloned.node_id()); + }); + + let mut group = c.benchmark_group("payments"); + group.sample_size(10); + + group.bench_function("payments", |b| { + // Use custom timing so that sending back the money at the end of each iteration isn't included in the + // measurement. + b.to_async(&runtime).iter_custom(|iter| { + let node_a = Arc::clone(&node_a); + let node_b = Arc::clone(&node_b); + + async move { + let mut total = Duration::ZERO; + for _i in 0..iter { + let node_a = Arc::clone(&node_a); + let node_b = Arc::clone(&node_b); + + total += send_payments(node_a, node_b).await; + } + total + } + }); + }); +} + +criterion_group!(benches, payment_benchmark); +criterion_main!(benches); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 0d6ba30ae..dd680488c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -29,11 +29,10 @@ use electrsd::corepc_node::{Client as BitcoindClient, Node as BitcoinD}; use electrsd::{corepc_node, ElectrsD}; use electrum_client::ElectrumApi; use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; -use ldk_node::io::sqlite_store::{SqliteStore, KV_TABLE_NAME, SQLITE_DB_FILE_NAME}; +use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, CustomTlvRecord, DynStore, Event, LightningBalance, Node, NodeError, - PendingSweepBalance, + Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, }; use lightning::io; use lightning::ln::msgs::SocketAddress; From 2c72e671312edb61fa5a80612207238bafe932e6 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 5 Nov 2025 11:22:00 +0100 Subject: [PATCH 140/184] Use `[patch]` instead of switching all the dependencies We previously added a bunch of commented-out `rust-lightning` dependencies in our `Cargo.toml` to be able to easily switch between `rust-lightning` locations. However, this is exactly what the `[patch]` command is for, which in particular also allows to patch a dependency for the whole tree, not only this one project. Therefore, we move the examples to a commented-out `patch` section. --- Cargo.toml | 76 ++++++++++++++++++++++++++---------------------------- 1 file changed, 37 insertions(+), 39 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 701d9ddb3..d91db014a 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,42 +41,6 @@ lightning-transaction-sync = { version = "0.2.0-rc1", features = ["esplora-async lightning-liquidity = { version = "0.2.0-rc1", features = ["std"] } lightning-macros = { version = "0.2.0-rc1" } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } -#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } -#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["tokio"] } -#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } - -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } -#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std"] } -#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["tokio"] } -#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } -#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } - -#lightning = { path = "../rust-lightning/lightning", features = ["std"] } -#lightning-types = { path = "../rust-lightning/lightning-types" } -#lightning-invoice = { path = "../rust-lightning/lightning-invoice", features = ["std"] } -#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } -#lightning-persister = { path = "../rust-lightning/lightning-persister", features = ["tokio"] } -#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } -#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } -#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rest-client", "rpc-client", "tokio"] } -#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum-rustls-ring", "time"] } -#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity", features = ["std"] } -#lightning-macros = { path = "../rust-lightning/lightning-macros" } - bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls-ring"]} @@ -109,9 +73,6 @@ winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] lightning = { version = "0.2.0-rc1", features = ["std", "_test_utils"] } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03", features = ["std", "_test_utils"] } -#lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } @@ -153,3 +114,40 @@ check-cfg = [ [[bench]] name = "payments" harness = false + +#[patch.crates-io] +#lightning = { path = "../rust-lightning/lightning" } +#lightning-types = { path = "../rust-lightning/lightning-types" } +#lightning-invoice = { path = "../rust-lightning/lightning-invoice" } +#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } +#lightning-persister = { path = "../rust-lightning/lightning-persister" } +#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } +#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } +#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync" } +#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } +#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } +#lightning-macros = { path = "../rust-lightning/lightning-macros" } + +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } + +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } From fb41112266fe0de6860020849d9cd118d5e74e07 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 5 Nov 2025 12:16:09 +0100 Subject: [PATCH 141/184] Elevate permissions of weekly `rustfmt` worflow .. which might be necessary for it to be able to run successfully. --- .github/workflows/cron-weekly-rustfmt.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/cron-weekly-rustfmt.yml b/.github/workflows/cron-weekly-rustfmt.yml index 626953c8e..d6326f03b 100644 --- a/.github/workflows/cron-weekly-rustfmt.yml +++ b/.github/workflows/cron-weekly-rustfmt.yml @@ -1,4 +1,9 @@ name: Nightly rustfmt + +permissions: + contents: write + pull-requests: write + on: schedule: - cron: "0 0 * * 0" # runs weekly on Sunday at 00:00 From dbfbf83e327cde37b246fd73e06b1af32a7a3175 Mon Sep 17 00:00:00 2001 From: Fmt Bot Date: Wed, 5 Nov 2025 14:27:24 +0000 Subject: [PATCH 142/184] 2025-11-05 automated rustfmt nightly --- benches/payments.rs | 4 ++-- src/lib.rs | 4 ++-- src/scoring.rs | 30 +++++++++++++----------------- src/types.rs | 3 +-- 4 files changed, 18 insertions(+), 23 deletions(-) diff --git a/benches/payments.rs b/benches/payments.rs index 75b7f0513..86dee39d8 100644 --- a/benches/payments.rs +++ b/benches/payments.rs @@ -1,8 +1,8 @@ #[path = "../tests/common/mod.rs"] mod common; -use std::time::Instant; -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; +use std::time::{Duration, Instant}; use bitcoin::hex::DisplayHex; use bitcoin::Amount; diff --git a/src/lib.rs b/src/lib.rs index 6a26c6c5b..fb0fb9f66 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -107,7 +107,6 @@ use std::net::ToSocketAddrs; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -use crate::scoring::setup_background_pathfinding_scores_sync; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; use bitcoin::secp256k1::PublicKey; #[cfg(feature = "uniffi")] @@ -158,12 +157,13 @@ use types::{ pub use types::{ ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, }; - pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, vss_client, }; +use crate::scoring::setup_background_pathfinding_scores_sync; + #[cfg(feature = "uniffi")] uniffi::include_scaffolding!("ldk_node"); diff --git a/src/scoring.rs b/src/scoring.rs index 107f63f65..e85abade3 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -1,22 +1,18 @@ -use std::{ - io::Cursor, - sync::{Arc, Mutex, RwLock}, - time::{Duration, SystemTime}, -}; +use std::io::Cursor; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, SystemTime}; -use crate::{ - config::{ - EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, - }, - io::utils::write_external_pathfinding_scores_to_cache, - logger::LdkLogger, - runtime::Runtime, - NodeMetrics, Scorer, -}; -use crate::{write_node_metrics, DynStore, Logger}; -use lightning::{ - log_error, log_info, log_trace, routing::scoring::ChannelLiquidities, util::ser::Readable, +use lightning::routing::scoring::ChannelLiquidities; +use lightning::util::ser::Readable; +use lightning::{log_error, log_info, log_trace}; + +use crate::config::{ + EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, }; +use crate::io::utils::write_external_pathfinding_scores_to_cache; +use crate::logger::LdkLogger; +use crate::runtime::Runtime; +use crate::{write_node_metrics, DynStore, Logger, NodeMetrics, Scorer}; /// Start a background task that periodically downloads scores via an external url and merges them into the local /// pathfinding scores. diff --git a/src/types.rs b/src/types.rs index 71512b2cd..b8dc10b18 100644 --- a/src/types.rs +++ b/src/types.rs @@ -18,8 +18,7 @@ use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::ln::types::ChannelId; use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; -use lightning::routing::scoring::CombinedScorer; -use lightning::routing::scoring::ProbabilisticScoringFeeParameters; +use lightning::routing::scoring::{CombinedScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; use lightning::util::persist::{KVStore, KVStoreSync, MonitorUpdatingPersister}; use lightning::util::ser::{Readable, Writeable, Writer}; From f5822a02329eacdc4fa756f422c274dda25cc127 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 13 Nov 2025 13:45:46 +0100 Subject: [PATCH 143/184] Introduce `InMemoryStore` for testing Recently, `rust-lightning` broke the (async) API of the `TestStore`, making it ~impossible to use in regular tests. Here, we un-DRY our `TestStore` implementation and simply copy over the previous `TestStore` version, now named `InMemoryStore` to discern the objects. We also switch all feasible instances over to use `InMemoryStore` rather than LDK's `test_utils::TestStore`. --- src/data_store.rs | 5 +- src/event.rs | 7 +- src/io/test_utils.rs | 128 +++++++++++++++++- .../asynchronous/static_invoice_store.rs | 4 +- src/peer_store.rs | 5 +- 5 files changed, 138 insertions(+), 11 deletions(-) diff --git a/src/data_store.rs b/src/data_store.rs index 83cbf4476..87bd831c9 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -172,10 +172,11 @@ where #[cfg(test)] mod tests { use lightning::impl_writeable_tlv_based; - use lightning::util::test_utils::{TestLogger, TestStore}; + use lightning::util::test_utils::TestLogger; use super::*; use crate::hex_utils; + use crate::io::test_utils::InMemoryStore; #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] struct TestObjectId { @@ -234,7 +235,7 @@ mod tests { #[test] fn data_is_persisted() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let primary_namespace = "datastore_test_primary".to_string(); let secondary_namespace = "datastore_test_secondary".to_string(); diff --git a/src/event.rs b/src/event.rs index 1946350a3..42b60e213 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1605,13 +1605,14 @@ mod tests { use std::sync::atomic::{AtomicU16, Ordering}; use std::time::Duration; - use lightning::util::test_utils::{TestLogger, TestStore}; + use lightning::util::test_utils::TestLogger; use super::*; + use crate::io::test_utils::InMemoryStore; #[tokio::test] async fn event_queue_persistence() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let event_queue = Arc::new(EventQueue::new(Arc::clone(&store), Arc::clone(&logger))); assert_eq!(event_queue.next_event(), None); @@ -1647,7 +1648,7 @@ mod tests { #[tokio::test] async fn event_queue_concurrency() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let event_queue = Arc::new(EventQueue::new(Arc::clone(&store), Arc::clone(&logger))); assert_eq!(event_queue.next_event(), None); diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index fd4de1c9f..310638dd8 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -5,8 +5,13 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::boxed::Box; +use std::collections::{hash_map, HashMap}; +use std::future::Future; use std::panic::RefUnwindSafe; use std::path::PathBuf; +use std::pin::Pin; +use std::sync::Mutex; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ @@ -14,10 +19,10 @@ use lightning::ln::functional_test_utils::{ create_network, create_node_cfgs, create_node_chanmgrs, send_payment, TestChanMonCfg, }; use lightning::util::persist::{ - KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, + KVStore, KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; +use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event, io}; use rand::distr::Alphanumeric; use rand::{rng, Rng}; @@ -32,6 +37,125 @@ type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5; +pub struct InMemoryStore { + persisted_bytes: Mutex>>>, +} + +impl InMemoryStore { + pub fn new() -> Self { + let persisted_bytes = Mutex::new(HashMap::new()); + Self { persisted_bytes } + } + + fn read_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + let persisted_lock = self.persisted_bytes.lock().unwrap(); + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + + if let Some(outer_ref) = persisted_lock.get(&prefixed) { + if let Some(inner_ref) = outer_ref.get(key) { + let bytes = inner_ref.clone(); + Ok(bytes) + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Key not found")) + } + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Namespace not found")) + } + } + + fn write_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let mut persisted_lock = self.persisted_bytes.lock().unwrap(); + + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new()); + outer_e.insert(key.to_string(), buf); + Ok(()) + } + + fn remove_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> io::Result<()> { + let mut persisted_lock = self.persisted_bytes.lock().unwrap(); + + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + if let Some(outer_ref) = persisted_lock.get_mut(&prefixed) { + outer_ref.remove(&key.to_string()); + } + + Ok(()) + } + + fn list_internal( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + let mut persisted_lock = self.persisted_bytes.lock().unwrap(); + + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + match persisted_lock.entry(prefixed) { + hash_map::Entry::Occupied(e) => Ok(e.get().keys().cloned().collect()), + hash_map::Entry::Vacant(_) => Ok(Vec::new()), + } + } +} + +impl KVStore for InMemoryStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + 'static + Send>> { + let res = self.read_internal(&primary_namespace, &secondary_namespace, &key); + Box::pin(async move { res }) + } + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + 'static + Send>> { + let res = self.write_internal(&primary_namespace, &secondary_namespace, &key, buf); + Box::pin(async move { res }) + } + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + 'static + Send>> { + let res = self.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy); + Box::pin(async move { res }) + } + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + 'static + Send>> { + let res = self.list_internal(primary_namespace, secondary_namespace); + Box::pin(async move { res }) + } +} + +impl KVStoreSync for InMemoryStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + self.write_internal(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + self.remove_internal(primary_namespace, secondary_namespace, key, lazy) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.list_internal(primary_namespace, secondary_namespace) + } +} + +unsafe impl Sync for InMemoryStore {} +unsafe impl Send for InMemoryStore {} + pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); let mut rng = rng(); diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index a7e2d2f9e..45125cfee 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -157,15 +157,15 @@ mod tests { use lightning::offers::offer::OfferBuilder; use lightning::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder}; use lightning::sign::EntropySource; - use lightning::util::test_utils::TestStore; use lightning_types::features::BlindedHopFeatures; + use crate::io::test_utils::InMemoryStore; use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; use crate::types::DynStore; #[tokio::test] async fn static_invoice_store_test() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let static_invoice_store = StaticInvoiceStore::new(Arc::clone(&store)); let static_invoice = invoice(); diff --git a/src/peer_store.rs b/src/peer_store.rs index 82c80c396..59cd3d94f 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -152,13 +152,14 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use lightning::util::test_utils::{TestLogger, TestStore}; + use lightning::util::test_utils::TestLogger; use super::*; + use crate::io::test_utils::InMemoryStore; #[test] fn peer_info_persistence() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let peer_store = PeerStore::new(Arc::clone(&store), Arc::clone(&logger)); From 4c7254139dc2eb09424cefda7c8c357b71d69c14 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 10 Nov 2025 13:48:51 +0100 Subject: [PATCH 144/184] Make `EventQueue` persistence `async` Previously, we'd still use `KVStoreSync` for persistence of our event queue, which also meant calling the sync persistence through our otherwise-async background processor/event handling flow. Here we switch our `EventQueue` persistence to be async, which gets us one step further towards async-everything. --- src/event.rs | 63 ++++++++++++++++++++++++++++------------------------ src/lib.rs | 5 ++++- 2 files changed, 38 insertions(+), 30 deletions(-) diff --git a/src/event.rs b/src/event.rs index 42b60e213..3de2c3261 100644 --- a/src/event.rs +++ b/src/event.rs @@ -26,7 +26,7 @@ use lightning::util::config::{ ChannelConfigOverrides, ChannelConfigUpdate, ChannelHandshakeConfigUpdate, }; use lightning::util::errors::APIError; -use lightning::util::persist::KVStoreSync; +use lightning::util::persist::KVStore; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_types::payment::{PaymentHash, PaymentPreimage}; @@ -301,12 +301,14 @@ where Self { queue, waker, kv_store, logger } } - pub(crate) fn add_event(&self, event: Event) -> Result<(), Error> { - { + pub(crate) async fn add_event(&self, event: Event) -> Result<(), Error> { + let data = { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.push_back(event); - self.persist_queue(&locked_queue)?; - } + EventQueueSerWrapper(&locked_queue).encode() + }; + + self.persist_queue(data).await?; if let Some(waker) = self.waker.lock().unwrap().take() { waker.wake(); @@ -323,12 +325,14 @@ where EventFuture { event_queue: Arc::clone(&self.queue), waker: Arc::clone(&self.waker) }.await } - pub(crate) fn event_handled(&self) -> Result<(), Error> { - { + pub(crate) async fn event_handled(&self) -> Result<(), Error> { + let data = { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.pop_front(); - self.persist_queue(&locked_queue)?; - } + EventQueueSerWrapper(&locked_queue).encode() + }; + + self.persist_queue(data).await?; if let Some(waker) = self.waker.lock().unwrap().take() { waker.wake(); @@ -336,15 +340,15 @@ where Ok(()) } - fn persist_queue(&self, locked_queue: &VecDeque) -> Result<(), Error> { - let data = EventQueueSerWrapper(locked_queue).encode(); - KVStoreSync::write( + async fn persist_queue(&self, encoded_queue: Vec) -> Result<(), Error> { + KVStore::write( &*self.kv_store, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, - data, + encoded_queue, ) + .await .map_err(|e| { log_error!( self.logger, @@ -694,7 +698,7 @@ where claim_deadline, custom_records, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!( @@ -928,7 +932,7 @@ where .map(|cf| cf.custom_tlvs().into_iter().map(|tlv| tlv.into()).collect()) .unwrap_or_default(), }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -988,7 +992,7 @@ where fee_paid_msat, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1019,7 +1023,7 @@ where let event = Event::PaymentFailed { payment_id: Some(payment_id), payment_hash, reason }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1295,7 +1299,7 @@ where claim_from_onchain_tx, outbound_amount_forwarded_msat, }; - self.event_queue.add_event(event).map_err(|e| { + self.event_queue.add_event(event).await.map_err(|e| { log_error!(self.logger, "Failed to push to event queue: {}", e); ReplayEvent() })?; @@ -1322,7 +1326,7 @@ where counterparty_node_id, funding_txo, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1383,7 +1387,7 @@ where user_channel_id: UserChannelId(user_channel_id), counterparty_node_id: Some(counterparty_node_id), }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1407,7 +1411,7 @@ where reason: Some(reason), }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1622,7 +1626,7 @@ mod tests { user_channel_id: UserChannelId(2323), counterparty_node_id: None, }; - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); // Check we get the expected event and that it is returned until we mark it handled. for _ in 0..5 { @@ -1631,18 +1635,19 @@ mod tests { } // Check we can read back what we persisted. - let persisted_bytes = KVStoreSync::read( + let persisted_bytes = KVStore::read( &*store, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, ) + .await .unwrap(); let deser_event_queue = EventQueue::read(&mut &persisted_bytes[..], (Arc::clone(&store), logger)).unwrap(); assert_eq!(deser_event_queue.next_event_async().await, expected_event); - event_queue.event_handled().unwrap(); + event_queue.event_handled().await.unwrap(); assert_eq!(event_queue.next_event(), None); } @@ -1676,28 +1681,28 @@ mod tests { let mut delayed_enqueue = false; for _ in 0..25 { - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); enqueued_events.fetch_add(1, Ordering::SeqCst); } loop { tokio::select! { _ = tokio::time::sleep(Duration::from_millis(10)), if !delayed_enqueue => { - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); enqueued_events.fetch_add(1, Ordering::SeqCst); delayed_enqueue = true; } e = event_queue.next_event_async() => { assert_eq!(e, expected_event); - event_queue.event_handled().unwrap(); + event_queue.event_handled().await.unwrap(); received_events.fetch_add(1, Ordering::SeqCst); - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); enqueued_events.fetch_add(1, Ordering::SeqCst); } e = event_queue.next_event_async() => { assert_eq!(e, expected_event); - event_queue.event_handled().unwrap(); + event_queue.event_handled().await.unwrap(); received_events.fetch_add(1, Ordering::SeqCst); } } diff --git a/src/lib.rs b/src/lib.rs index 701a14dde..982673f4a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -777,7 +777,10 @@ impl Node { /// /// **Note:** This **MUST** be called after each event has been handled. pub fn event_handled(&self) -> Result<(), Error> { - self.event_queue.event_handled().map_err(|e| { + // We use our runtime for the sync variant to ensure `tokio::task::block_in_place` is + // always called if we'd ever hit this in an outer runtime context. + let fut = self.event_queue.event_handled(); + self.runtime.block_on(fut).map_err(|e| { log_error!( self.logger, "Couldn't mark event handled due to persistence failure: {}", From 7c352341f75ce032e90e5b92f20d5cc46fbfc208 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 13 Nov 2025 14:14:01 +0100 Subject: [PATCH 145/184] Allow to set optional `RouteParametersConfig` in BOLT12 API Previously, LDK only allowed to set this for BOLT11 payments. Since we now can, we allow to specify the `RouteParametersConfig` in BOLT12 and `UnifiedQrPayment` APIs. --- bindings/ldk_node.udl | 8 ++++---- src/lib.rs | 2 ++ src/payment/bolt12.rs | 35 +++++++++++++++++++++++---------- src/payment/unified_qr.rs | 12 ++++++++--- tests/integration_tests_rust.rs | 21 +++++++++++++------- 5 files changed, 54 insertions(+), 24 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index ab2f483a1..86727231d 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -201,9 +201,9 @@ interface Bolt11Payment { interface Bolt12Payment { [Throws=NodeError] - PaymentId send([ByRef]Offer offer, u64? quantity, string? payer_note); + PaymentId send([ByRef]Offer offer, u64? quantity, string? payer_note, RouteParametersConfig? route_parameters); [Throws=NodeError] - PaymentId send_using_amount([ByRef]Offer offer, u64 amount_msat, u64? quantity, string? payer_note); + PaymentId send_using_amount([ByRef]Offer offer, u64 amount_msat, u64? quantity, string? payer_note, RouteParametersConfig? route_parameters); [Throws=NodeError] Offer receive(u64 amount_msat, [ByRef]string description, u32? expiry_secs, u64? quantity); [Throws=NodeError] @@ -211,7 +211,7 @@ interface Bolt12Payment { [Throws=NodeError] Bolt12Invoice request_refund_payment([ByRef]Refund refund); [Throws=NodeError] - Refund initiate_refund(u64 amount_msat, u32 expiry_secs, u64? quantity, string? payer_note); + Refund initiate_refund(u64 amount_msat, u32 expiry_secs, u64? quantity, string? payer_note, RouteParametersConfig? route_parameters); [Throws=NodeError] Offer receive_async(); [Throws=NodeError] @@ -256,7 +256,7 @@ interface UnifiedQrPayment { [Throws=NodeError] string receive(u64 amount_sats, [ByRef]string message, u32 expiry_sec); [Throws=NodeError] - QrPaymentResult send([ByRef]string uri_str); + QrPaymentResult send([ByRef]string uri_str, RouteParametersConfig? route_parameters); }; interface LSPS1Liquidity { diff --git a/src/lib.rs b/src/lib.rs index 701a14dde..ff4f6ad55 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -854,6 +854,7 @@ impl Node { Bolt12Payment::new( Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.config), Arc::clone(&self.is_running), Arc::clone(&self.logger), self.async_payments_role, @@ -868,6 +869,7 @@ impl Node { Arc::new(Bolt12Payment::new( Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.config), Arc::clone(&self.is_running), Arc::clone(&self.logger), self.async_payments_role, diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 2b299739d..0dd38edca 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -23,7 +23,7 @@ use lightning::util::ser::{Readable, Writeable}; use lightning_types::string::UntrustedString; use rand::RngCore; -use crate::config::{AsyncPaymentsRole, LDK_PAYMENT_RETRY_TIMEOUT}; +use crate::config::{AsyncPaymentsRole, Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::error::Error; use crate::ffi::{maybe_deref, maybe_wrap}; use crate::logger::{log_error, log_info, LdkLogger, Logger}; @@ -54,6 +54,7 @@ type Refund = Arc; pub struct Bolt12Payment { channel_manager: Arc, payment_store: Arc, + config: Arc, is_running: Arc>, logger: Arc, async_payments_role: Option, @@ -62,10 +63,10 @@ pub struct Bolt12Payment { impl Bolt12Payment { pub(crate) fn new( channel_manager: Arc, payment_store: Arc, - is_running: Arc>, logger: Arc, + config: Arc, is_running: Arc>, logger: Arc, async_payments_role: Option, ) -> Self { - Self { channel_manager, payment_store, is_running, logger, async_payments_role } + Self { channel_manager, payment_store, config, is_running, logger, async_payments_role } } /// Send a payment given an offer. @@ -74,8 +75,12 @@ impl Bolt12Payment { /// response. /// /// If `quantity` is `Some` it represents the number of items requested. + /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send( &self, offer: &Offer, quantity: Option, payer_note: Option, + route_parameters: Option, ) -> Result { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); @@ -87,7 +92,8 @@ impl Bolt12Payment { rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let route_params_config = RouteParametersConfig::default(); + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let offer_amount_msat = match offer.amount() { Some(Amount::Bitcoin { amount_msats }) => amount_msats, @@ -104,7 +110,7 @@ impl Bolt12Payment { let params = OptionalOfferPaymentParams { payer_note: payer_note.clone(), retry_strategy, - route_params_config, + route_params_config: route_parameters, }; let res = if let Some(quantity) = quantity { self.channel_manager @@ -181,8 +187,12 @@ impl Bolt12Payment { /// /// If `payer_note` is `Some` it will be seen by the recipient and reflected back in the invoice /// response. + /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send_using_amount( &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, + route_parameters: Option, ) -> Result { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); @@ -194,7 +204,8 @@ impl Bolt12Payment { rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let route_params_config = RouteParametersConfig::default(); + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let offer_amount_msat = match offer.amount() { Some(Amount::Bitcoin { amount_msats }) => amount_msats, @@ -215,7 +226,7 @@ impl Bolt12Payment { let params = OptionalOfferPaymentParams { payer_note: payer_note.clone(), retry_strategy, - route_params_config, + route_params_config: route_parameters, }; let res = if let Some(quantity) = quantity { self.channel_manager.pay_for_offer_with_quantity( @@ -402,10 +413,13 @@ impl Bolt12Payment { /// Returns a [`Refund`] object that can be used to offer a refund payment of the amount given. /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + /// /// [`Refund`]: lightning::offers::refund::Refund pub fn initiate_refund( &self, amount_msat: u64, expiry_secs: u32, quantity: Option, - payer_note: Option, + payer_note: Option, route_parameters: Option, ) -> Result { let mut random_bytes = [0u8; 32]; rand::rng().fill_bytes(&mut random_bytes); @@ -415,7 +429,8 @@ impl Bolt12Payment { .duration_since(UNIX_EPOCH) .unwrap(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let route_params_config = RouteParametersConfig::default(); + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let mut refund_builder = self .channel_manager @@ -424,7 +439,7 @@ impl Bolt12Payment { absolute_expiry, payment_id, retry_strategy, - route_params_config, + route_parameters, ) .map_err(|e| { log_error!(self.logger, "Failed to create refund builder: {:?}", e); diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index fc2eca150..6ebf25563 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -20,6 +20,7 @@ use bitcoin::address::{NetworkChecked, NetworkUnchecked}; use bitcoin::{Amount, Txid}; use lightning::ln::channelmanager::PaymentId; use lightning::offers::offer::Offer; +use lightning::routing::router::RouteParametersConfig; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; use crate::error::Error; @@ -137,8 +138,13 @@ impl UnifiedQrPayment { /// Returns a `QrPaymentResult` indicating the outcome of the payment. If an error /// occurs, an `Error` is returned detailing the issue encountered. /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki - pub fn send(&self, uri_str: &str) -> Result { + pub fn send( + &self, uri_str: &str, route_parameters: Option, + ) -> Result { let uri: bip21::Uri = uri_str.parse().map_err(|_| Error::InvalidUri)?; @@ -147,7 +153,7 @@ impl UnifiedQrPayment { if let Some(offer) = uri_network_checked.extras.bolt12_offer { let offer = maybe_wrap(offer); - match self.bolt12_payment.send(&offer, None, None) { + match self.bolt12_payment.send(&offer, None, None, route_parameters) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt12 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified QR code payment. Falling back to the BOLT11 invoice.", e), } @@ -155,7 +161,7 @@ impl UnifiedQrPayment { if let Some(invoice) = uri_network_checked.extras.bolt11_invoice { let invoice = maybe_wrap(invoice); - match self.bolt11_invoice.send(&invoice, None) { + match self.bolt11_invoice.send(&invoice, route_parameters) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt11 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified QR code payment. Falling back to the on-chain transaction.", e), } diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index e2d4207cd..399fe0f58 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -967,7 +967,7 @@ async fn simple_bolt12_send_receive() { let expected_payer_note = Some("Test".to_string()); let payment_id = node_a .bolt12_payment() - .send(&offer, expected_quantity, expected_payer_note.clone()) + .send(&offer, expected_quantity, expected_payer_note.clone(), None) .unwrap(); expect_payment_successful_event!(node_a, Some(payment_id), None); @@ -1023,7 +1023,7 @@ async fn simple_bolt12_send_receive() { let expected_payer_note = Some("Test".to_string()); assert!(node_a .bolt12_payment() - .send_using_amount(&offer, less_than_offer_amount, None, None) + .send_using_amount(&offer, less_than_offer_amount, None, None, None) .is_err()); let payment_id = node_a .bolt12_payment() @@ -1032,6 +1032,7 @@ async fn simple_bolt12_send_receive() { expected_amount_msat, expected_quantity, expected_payer_note.clone(), + None, ) .unwrap(); @@ -1089,7 +1090,13 @@ async fn simple_bolt12_send_receive() { let expected_payer_note = Some("Test".to_string()); let refund = node_b .bolt12_payment() - .initiate_refund(overpaid_amount, 3600, expected_quantity, expected_payer_note.clone()) + .initiate_refund( + overpaid_amount, + 3600, + expected_quantity, + expected_payer_note.clone(), + None, + ) .unwrap(); let invoice = node_a.bolt12_payment().request_refund_payment(&refund).unwrap(); expect_payment_received_event!(node_a, overpaid_amount); @@ -1275,7 +1282,7 @@ async fn async_payment() { node_receiver.stop().unwrap(); let payment_id = - node_sender.bolt12_payment().send_using_amount(&offer, 5_000, None, None).unwrap(); + node_sender.bolt12_payment().send_using_amount(&offer, 5_000, None, None, None).unwrap(); // Sleep to allow the payment reach a state where the htlc is held and waiting for the receiver to come online. tokio::time::sleep(std::time::Duration::from_millis(3000)).await; @@ -1473,7 +1480,7 @@ async fn unified_qr_send_receive() { let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); let uri_str = uqr_payment.clone().unwrap(); - let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str) { + let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str, None) { Ok(QrPaymentResult::Bolt12 { payment_id }) => { println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); payment_id @@ -1494,7 +1501,7 @@ async fn unified_qr_send_receive() { // Cut off the BOLT12 part to fallback to BOLT11. let uri_str_without_offer = uri_str.split("&lno=").next().unwrap(); let invoice_payment_id: PaymentId = - match node_a.unified_qr_payment().send(uri_str_without_offer) { + match node_a.unified_qr_payment().send(uri_str_without_offer, None) { Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected Bolt11 payment but got Bolt12"); }, @@ -1517,7 +1524,7 @@ async fn unified_qr_send_receive() { // Cut off any lightning part to fallback to on-chain only. let uri_str_without_lightning = onchain_uqr_payment.split("&lightning=").next().unwrap(); - let txid = match node_a.unified_qr_payment().send(&uri_str_without_lightning) { + let txid = match node_a.unified_qr_payment().send(&uri_str_without_lightning, None) { Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected on-chain payment but got Bolt12") }, From 996b58e62b7ca618649ea09f6a6b637e3c583008 Mon Sep 17 00:00:00 2001 From: Chuks Agbakuru Date: Thu, 13 Nov 2025 17:29:39 +0100 Subject: [PATCH 146/184] Add explicit type definition for ChannelDetails This change uses an alias (LdkChannelDetails) and an explicit Vec type annotation for 'open_channels' in close_channel_internal and update_channel_config. This resolves type ambiguity caused by a name collision with the local ChannelDetails struct, which prevents rust-analyzer from correctly inferring the type as Vec, leading to an incorrect 'len() is private' error. --- src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 701a14dde..d1090bd89 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -133,7 +133,7 @@ use io::utils::write_node_metrics; use lightning::chain::BestBlock; use lightning::events::bump_transaction::Wallet as LdkWallet; use lightning::impl_writeable_tlv_based; -use lightning::ln::channel_state::ChannelShutdownState; +use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; @@ -1289,7 +1289,7 @@ impl Node { force_close_reason.is_none() || force, "Reason can only be set for force closures" ); - let open_channels = + let open_channels: Vec = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); if let Some(channel_details) = open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) @@ -1328,7 +1328,7 @@ impl Node { &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, channel_config: ChannelConfig, ) -> Result<(), Error> { - let open_channels = + let open_channels: Vec = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); if let Some(channel_details) = open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) From ac948f6dbe019a2565572025c668ce0c1f4ce846 Mon Sep 17 00:00:00 2001 From: Chuks Agbakuru Date: Thu, 13 Nov 2025 17:52:52 +0100 Subject: [PATCH 147/184] Replace deprecated thread_rng with rng --- benches/payments.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benches/payments.rs b/benches/payments.rs index 86dee39d8..ba69e046d 100644 --- a/benches/payments.rs +++ b/benches/payments.rs @@ -20,7 +20,7 @@ use crate::common::open_channel_push_amt; fn spawn_payment(node_a: Arc, node_b: Arc, amount_msat: u64) { let mut preimage_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut preimage_bytes); + rand::rng().fill_bytes(&mut preimage_bytes); let preimage = PaymentPreimage(preimage_bytes); let payment_hash: PaymentHash = preimage.into(); @@ -101,7 +101,7 @@ async fn send_payments(node_a: Arc, node_b: Arc) -> std::time::Durat // Send back the money for the next iteration. let mut preimage_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut preimage_bytes); + rand::rng().fill_bytes(&mut preimage_bytes); node_b .spontaneous_payment() .send_with_preimage( From 20ffc9cdc1306d5c34c5602e5a2954ed38b520c4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 31 Oct 2025 11:49:42 +0100 Subject: [PATCH 148/184] Implement `lazy` deletes for `VssStore` We implement `lazy` deletion for `VssStore` by tracking pending lazy deletes and supplying them as `delete_items` on the next `put` operation. --- src/io/vss_store.rs | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 0e7d0872a..cea1e5864 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -15,6 +15,7 @@ use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; +use bdk_chain::Merge; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::{KVStore, KVStoreSync}; @@ -181,7 +182,7 @@ impl KVStoreSync for VssStore { } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> io::Result<()> { let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { debug_assert!(false, "Failed to access internal runtime"); @@ -203,6 +204,7 @@ impl KVStoreSync for VssStore { primary_namespace, secondary_namespace, key, + lazy, ) .await }; @@ -275,7 +277,7 @@ impl KVStore for VssStore { }) } fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> Pin> + Send>> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); @@ -292,6 +294,7 @@ impl KVStore for VssStore { primary_namespace, secondary_namespace, key, + lazy, ) .await }) @@ -321,6 +324,7 @@ struct VssStoreInner { // Per-key locks that ensures that we don't have concurrent writes to the same namespace/key. // The lock also encapsulates the latest written version per key. locks: Mutex>>>, + pending_lazy_deletes: Mutex>, } impl VssStoreInner { @@ -347,7 +351,8 @@ impl VssStoreInner { let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); let locks = Mutex::new(HashMap::new()); - Self { client, store_id, storable_builder, key_obfuscator, locks } + let pending_lazy_deletes = Mutex::new(Vec::new()); + Self { client, store_id, storable_builder, key_obfuscator, locks, pending_lazy_deletes } } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { @@ -451,6 +456,12 @@ impl VssStoreInner { "write", )?; + let delete_items = self + .pending_lazy_deletes + .try_lock() + .ok() + .and_then(|mut guard| guard.take()) + .unwrap_or_default(); self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { let obfuscated_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); @@ -464,7 +475,7 @@ impl VssStoreInner { version: vss_version, value: storable.encode_to_vec(), }], - delete_items: vec![], + delete_items, }; self.client.put_object(&request).await.map_err(|e| { @@ -482,7 +493,7 @@ impl VssStoreInner { async fn remove_internal( &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: String, secondary_namespace: String, key: String, + primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, ) -> io::Result<()> { check_namespace_key_validity( &primary_namespace, @@ -491,13 +502,19 @@ impl VssStoreInner { "remove", )?; + let obfuscated_key = + self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + + let key_value = KeyValue { key: obfuscated_key, version: -1, value: vec![] }; + if lazy { + let mut pending_lazy_deletes = self.pending_lazy_deletes.lock().unwrap(); + pending_lazy_deletes.push(key_value); + return Ok(()); + } + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { - let obfuscated_key = - self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); - let request = DeleteObjectRequest { - store_id: self.store_id.clone(), - key_value: Some(KeyValue { key: obfuscated_key, version: -1, value: vec![] }), - }; + let request = + DeleteObjectRequest { store_id: self.store_id.clone(), key_value: Some(key_value) }; self.client.delete_object(&request).await.map_err(|e| { let msg = format!( From db246fc035276ba5d5810e28568114ce9a927efb Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 17 Nov 2025 10:11:26 +0100 Subject: [PATCH 149/184] f Restore delete_items on failed write --- src/io/vss_store.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index cea1e5864..f05e16669 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -475,10 +475,15 @@ impl VssStoreInner { version: vss_version, value: storable.encode_to_vec(), }], - delete_items, + delete_items: delete_items.clone(), }; self.client.put_object(&request).await.map_err(|e| { + // Restore delete items so they'll be retried on next write. + if !delete_items.is_empty() { + self.pending_lazy_deletes.lock().unwrap().extend(delete_items); + } + let msg = format!( "Failed to write to key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e From c2b6b18d2978de35062fb1b521e60226242f1a44 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 31 Oct 2025 12:03:22 +0100 Subject: [PATCH 150/184] Add test for `lazy` deletion behavior We add a testcase that ensures we only delete a lazily-deleted key after the next write operation succeeds. Co-authored by Claude AI --- src/io/vss_store.rs | 83 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index f05e16669..49f038997 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -666,4 +666,87 @@ mod tests { do_read_write_remove_list_persist(&vss_store); drop(vss_store) } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn vss_lazy_delete() { + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + let mut rng = rng(); + let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); + let mut vss_seed = [0u8; 32]; + rng.fill_bytes(&mut vss_seed); + let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); + let logger = Arc::new(Logger::new_log_facade()); + let runtime = Arc::new(Runtime::new(logger).unwrap()); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); + + let primary_namespace = "test_namespace"; + let secondary_namespace = ""; + let key_to_delete = "key_to_delete"; + let key_for_trigger = "key_for_trigger"; + let data_to_delete = b"data_to_delete".to_vec(); + let trigger_data = b"trigger_data".to_vec(); + + // Write the key that we'll later lazily delete + KVStore::write( + &vss_store, + primary_namespace, + secondary_namespace, + key_to_delete, + data_to_delete.clone(), + ) + .await + .unwrap(); + + // Verify the key exists + let read_data = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete) + .await + .unwrap(); + assert_eq!(read_data, data_to_delete); + + // Perform a lazy delete + KVStore::remove(&vss_store, primary_namespace, secondary_namespace, key_to_delete, true) + .await + .unwrap(); + + // Verify the key still exists (lazy delete doesn't immediately remove it) + let read_data = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete) + .await + .unwrap(); + assert_eq!(read_data, data_to_delete); + + // Verify the key is still in the list + let keys = KVStore::list(&vss_store, primary_namespace, secondary_namespace).await.unwrap(); + assert!(keys.contains(&key_to_delete.to_string())); + + // Trigger the actual deletion by performing a write operation + KVStore::write( + &vss_store, + primary_namespace, + secondary_namespace, + key_for_trigger, + trigger_data.clone(), + ) + .await + .unwrap(); + + // Now verify the key is actually deleted + let read_result = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete).await; + assert!(read_result.is_err()); + assert_eq!(read_result.unwrap_err().kind(), ErrorKind::NotFound); + + // Verify the key is no longer in the list + let keys = KVStore::list(&vss_store, primary_namespace, secondary_namespace).await.unwrap(); + assert!(!keys.contains(&key_to_delete.to_string())); + + // Verify the trigger key still exists + let read_data = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_for_trigger) + .await + .unwrap(); + assert_eq!(read_data, trigger_data); + } } From 3d5013b5f5ba4d68ef3b8e1fd8c6f2da82984dbc Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 11 Nov 2025 12:54:04 +0100 Subject: [PATCH 151/184] Introduce schema versioning We introduce an `enum VssSchemaVersion` that will allow us to discern different behaviors based on the schema version based on a backwards compatible manner. --- src/io/vss_store.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 49f038997..d6f157ae4 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -45,6 +45,13 @@ type CustomRetryPolicy = FilteredRetryPolicy< Box bool + 'static + Send + Sync>, >; +enum VssSchemaVersion { + // The initial schema version. + // This used an empty `aad` and unobfuscated `primary_namespace`/`secondary_namespace`s in the + // stored key. + V0, +} + // We set this to a small number of threads that would still allow to make some progress if one // would hit a blocking case const INTERNAL_RUNTIME_WORKERS: usize = 2; From aa25497fb7df4338df56f7783ae2a2964c7ff807 Mon Sep 17 00:00:00 2001 From: Martin Saposnic Date: Wed, 10 Sep 2025 14:46:04 -0300 Subject: [PATCH 152/184] Support client_trusts_lsp=true on ldk-node Implement changes introduced on https://github.com/lightningdevkit/rust-lightning/pull/3838 as discussed, client_trusts_lsp is a flag set at startup. --- bindings/ldk_node.udl | 1 + src/event.rs | 44 ++++- src/liquidity.rs | 73 +++++++- tests/integration_tests_rust.rs | 307 +++++++++++++++++++++++++++++++- 4 files changed, 413 insertions(+), 12 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 86727231d..077a20433 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -44,6 +44,7 @@ dictionary LSPS2ServiceConfig { u32 max_client_to_self_delay; u64 min_payment_size_msat; u64 max_payment_size_msat; + boolean client_trusts_lsp; }; enum LogLevel { diff --git a/src/event.rs b/src/event.rs index 3de2c3261..46488549c 100644 --- a/src/event.rs +++ b/src/event.rs @@ -491,7 +491,7 @@ where counterparty_node_id, channel_value_satoshis, output_script, - .. + user_channel_id, } => { // Construct the raw transaction with the output that is paid the amount of the // channel. @@ -510,12 +510,36 @@ where locktime, ) { Ok(final_tx) => { - // Give the funding transaction back to LDK for opening the channel. - match self.channel_manager.funding_transaction_generated( - temporary_channel_id, - counterparty_node_id, - final_tx, - ) { + let needs_manual_broadcast = + self.liquidity_source.as_ref().map_or(false, |ls| { + ls.as_ref().lsps2_channel_needs_manual_broadcast( + counterparty_node_id, + user_channel_id, + ) + }); + + let result = if needs_manual_broadcast { + self.liquidity_source.as_ref().map(|ls| { + ls.lsps2_store_funding_transaction( + user_channel_id, + counterparty_node_id, + final_tx.clone(), + ); + }); + self.channel_manager.funding_transaction_generated_manual_broadcast( + temporary_channel_id, + counterparty_node_id, + final_tx, + ) + } else { + self.channel_manager.funding_transaction_generated( + temporary_channel_id, + counterparty_node_id, + final_tx, + ) + }; + + match result { Ok(()) => {}, Err(APIError::APIMisuseError { err }) => { log_error!(self.logger, "Panicking due to APIMisuseError: {}", err); @@ -554,8 +578,10 @@ where }, } }, - LdkEvent::FundingTxBroadcastSafe { .. } => { - debug_assert!(false, "We currently only support safe funding, so this event should never be emitted."); + LdkEvent::FundingTxBroadcastSafe { user_channel_id, counterparty_node_id, .. } => { + self.liquidity_source.as_ref().map(|ls| { + ls.lsps2_funding_tx_broadcast_safe(user_channel_id, counterparty_node_id); + }); }, LdkEvent::PaymentClaimable { payment_hash, diff --git a/src/liquidity.rs b/src/liquidity.rs index 57e2ad488..ee520e14d 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -14,6 +14,7 @@ use std::time::Duration; use bitcoin::hashes::{sha256, Hash}; use bitcoin::secp256k1::{PublicKey, Secp256k1}; +use bitcoin::Transaction; use chrono::Utc; use lightning::events::HTLCHandlingFailureType; use lightning::ln::channelmanager::{InterceptId, MIN_FINAL_CLTV_EXPIRY_DELTA}; @@ -51,7 +52,6 @@ use crate::{total_anchor_channels_reserve_sats, Config, Error}; const LIQUIDITY_REQUEST_TIMEOUT_SECS: u64 = 5; const LSPS2_GETINFO_REQUEST_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24); -const LSPS2_CLIENT_TRUSTS_LSP_MODE: bool = true; const LSPS2_CHANNEL_CLTV_EXPIRY_DELTA: u32 = 72; struct LSPS1Client { @@ -130,6 +130,8 @@ pub struct LSPS2ServiceConfig { pub min_payment_size_msat: u64, /// The maximum payment size that we will accept when opening a channel. pub max_payment_size_msat: u64, + /// Use the client trusts lsp model + pub client_trusts_lsp: bool, } pub(crate) struct LiquiditySourceBuilder @@ -305,6 +307,73 @@ where self.lsps2_client.as_ref().map(|s| (s.lsp_node_id, s.lsp_address.clone())) } + pub(crate) fn lsps2_channel_needs_manual_broadcast( + &self, counterparty_node_id: PublicKey, user_channel_id: u128, + ) -> bool { + self.lsps2_service.as_ref().map_or(false, |lsps2_service| { + lsps2_service.service_config.client_trusts_lsp + && self + .liquidity_manager() + .lsps2_service_handler() + .and_then(|handler| { + handler + .channel_needs_manual_broadcast(user_channel_id, &counterparty_node_id) + .ok() + }) + .unwrap_or(false) + }) + } + + pub(crate) fn lsps2_store_funding_transaction( + &self, user_channel_id: u128, counterparty_node_id: PublicKey, funding_tx: Transaction, + ) { + if self.lsps2_service.as_ref().map_or(false, |svc| !svc.service_config.client_trusts_lsp) { + // Only necessary for client-trusts-LSP flow + return; + } + + let lsps2_service_handler = self.liquidity_manager.lsps2_service_handler(); + if let Some(handler) = lsps2_service_handler { + handler + .store_funding_transaction(user_channel_id, &counterparty_node_id, funding_tx) + .unwrap_or_else(|e| { + debug_assert!(false, "Failed to store funding transaction: {:?}", e); + log_error!(self.logger, "Failed to store funding transaction: {:?}", e); + }); + } else { + log_error!(self.logger, "LSPS2 service handler is not available."); + } + } + + pub(crate) fn lsps2_funding_tx_broadcast_safe( + &self, user_channel_id: u128, counterparty_node_id: PublicKey, + ) { + if self.lsps2_service.as_ref().map_or(false, |svc| !svc.service_config.client_trusts_lsp) { + // Only necessary for client-trusts-LSP flow + return; + } + + let lsps2_service_handler = self.liquidity_manager.lsps2_service_handler(); + if let Some(handler) = lsps2_service_handler { + handler + .set_funding_tx_broadcast_safe(user_channel_id, &counterparty_node_id) + .unwrap_or_else(|e| { + debug_assert!( + false, + "Failed to mark funding transaction safe to broadcast: {:?}", + e + ); + log_error!( + self.logger, + "Failed to mark funding transaction safe to broadcast: {:?}", + e + ); + }); + } else { + log_error!(self.logger, "LSPS2 service handler is not available."); + } + } + pub(crate) async fn handle_next_event(&self) { match self.liquidity_manager.next_event_async().await { LiquidityEvent::LSPS1Client(LSPS1ClientEvent::SupportedOptionsReady { @@ -594,7 +663,7 @@ where request_id, intercept_scid, LSPS2_CHANNEL_CLTV_EXPIRY_DELTA, - LSPS2_CLIENT_TRUSTS_LSP_MODE, + service_config.client_trusts_lsp, user_channel_id, ) .await diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 399fe0f58..69df12710 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -1552,8 +1552,12 @@ async fn unified_qr_send_receive() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn lsps2_client_service_integration() { - let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + do_lsps2_client_service_integration(true).await; + do_lsps2_client_service_integration(false).await; +} +async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); let sync_config = EsploraSyncConfig { background_sync_config: None }; @@ -1571,6 +1575,7 @@ async fn lsps2_client_service_integration() { min_channel_lifetime: 100, min_channel_opening_fee_msat: 0, max_client_to_self_delay: 1024, + client_trusts_lsp, }; let service_config = random_config(true); @@ -1867,3 +1872,303 @@ async fn drop_in_async_context() { let node = setup_node(&chain_source, config, Some(seed_bytes)); node.stop().unwrap(); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn lsps2_client_trusts_lsp() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + + let sync_config = EsploraSyncConfig { background_sync_config: None }; + + // Setup three nodes: service, client, and payer + let channel_opening_fee_ppm = 10_000; + let channel_over_provisioning_ppm = 100_000; + let lsps2_service_config = LSPS2ServiceConfig { + require_token: None, + advertise_service: false, + channel_opening_fee_ppm, + channel_over_provisioning_ppm, + max_payment_size_msat: 1_000_000_000, + min_payment_size_msat: 0, + min_channel_lifetime: 100, + min_channel_opening_fee_msat: 0, + max_client_to_self_delay: 1024, + client_trusts_lsp: true, + }; + + let service_config = random_config(true); + setup_builder!(service_builder, service_config.node_config); + service_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + service_builder.set_liquidity_provider_lsps2(lsps2_service_config); + let service_node = service_builder.build().unwrap(); + service_node.start().unwrap(); + let service_node_id = service_node.node_id(); + let service_addr = service_node.listening_addresses().unwrap().first().unwrap().clone(); + + let client_config = random_config(true); + setup_builder!(client_builder, client_config.node_config); + client_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + client_builder.set_liquidity_source_lsps2(service_node_id, service_addr.clone(), None); + let client_node = client_builder.build().unwrap(); + client_node.start().unwrap(); + let client_node_id = client_node.node_id(); + + let payer_config = random_config(true); + setup_builder!(payer_builder, payer_config.node_config); + payer_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + let payer_node = payer_builder.build().unwrap(); + payer_node.start().unwrap(); + + let service_addr_onchain = service_node.onchain_payment().new_address().unwrap(); + let client_addr_onchain = client_node.onchain_payment().new_address().unwrap(); + let payer_addr_onchain = payer_node.onchain_payment().new_address().unwrap(); + + let premine_amount_sat = 10_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![service_addr_onchain, client_addr_onchain, payer_addr_onchain], + Amount::from_sat(premine_amount_sat), + ) + .await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + println!("Premine complete!"); + // Open a channel payer -> service that will allow paying the JIT invoice + open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd).await; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + expect_channel_ready_event!(payer_node, service_node.node_id()); + expect_channel_ready_event!(service_node, payer_node.node_id()); + + let invoice_description = + Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()); + let jit_amount_msat = 100_000_000; + + println!("Generating JIT invoice!"); + let manual_preimage = PaymentPreimage([42u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let res = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description.into(), + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let payment_id = payer_node.bolt11_payment().send(&res, None).unwrap(); + println!("Payment ID: {:?}", payment_id); + let funding_txo = expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + + // Check the funding transaction hasn't been broadcasted yet and nodes aren't seeing it. + println!("Try to find funding tx... It won't be found yet, as the client has not claimed it."); + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + let mempool = bitcoind.client.get_raw_mempool().unwrap().into_model().unwrap(); + let funding_tx_found = mempool.0.iter().any(|txid| *txid == funding_txo.txid); + assert!(!funding_tx_found, "Funding transaction should NOT be broadcast yet"); + + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + assert_eq!( + client_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == service_node_id) + .unwrap() + .confirmations, + Some(0) + ); + assert_eq!( + service_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == client_node_id) + .unwrap() + .confirmations, + Some(0) + ); + + // Now claim the JIT payment, which should release the funding transaction + let service_fee_msat = (jit_amount_msat * channel_opening_fee_ppm as u64) / 1_000_000; + let expected_received_amount_msat = jit_amount_msat - service_fee_msat; + + let _ = expect_payment_claimable_event!( + client_node, + payment_id, + manual_payment_hash, + expected_received_amount_msat + ); + + client_node + .bolt11_payment() + .claim_for_hash(manual_payment_hash, jit_amount_msat, manual_preimage) + .unwrap(); + + expect_payment_successful_event!(payer_node, Some(payment_id), None); + + let _ = expect_payment_received_event!(client_node, expected_received_amount_msat).unwrap(); + + // Check the nodes pick up on the confirmed funding tx now. + wait_for_tx(&electrsd.client, funding_txo.txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + assert_eq!( + client_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == service_node_id) + .unwrap() + .confirmations, + Some(6) + ); + assert_eq!( + service_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == client_node_id) + .unwrap() + .confirmations, + Some(6) + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + + let sync_config = EsploraSyncConfig { background_sync_config: None }; + + // Setup three nodes: service, client, and payer + let channel_opening_fee_ppm = 10_000; + let channel_over_provisioning_ppm = 100_000; + let lsps2_service_config = LSPS2ServiceConfig { + require_token: None, + advertise_service: false, + channel_opening_fee_ppm, + channel_over_provisioning_ppm, + max_payment_size_msat: 1_000_000_000, + min_payment_size_msat: 0, + min_channel_lifetime: 100, + min_channel_opening_fee_msat: 0, + max_client_to_self_delay: 1024, + client_trusts_lsp: false, + }; + + let service_config = random_config(true); + setup_builder!(service_builder, service_config.node_config); + service_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + service_builder.set_liquidity_provider_lsps2(lsps2_service_config); + let service_node = service_builder.build().unwrap(); + service_node.start().unwrap(); + + let service_node_id = service_node.node_id(); + let service_addr = service_node.listening_addresses().unwrap().first().unwrap().clone(); + + let client_config = random_config(true); + setup_builder!(client_builder, client_config.node_config); + client_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + client_builder.set_liquidity_source_lsps2(service_node_id, service_addr.clone(), None); + let client_node = client_builder.build().unwrap(); + client_node.start().unwrap(); + + let client_node_id = client_node.node_id(); + + let payer_config = random_config(true); + setup_builder!(payer_builder, payer_config.node_config); + payer_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + let payer_node = payer_builder.build().unwrap(); + payer_node.start().unwrap(); + + let service_addr_onchain = service_node.onchain_payment().new_address().unwrap(); + let client_addr_onchain = client_node.onchain_payment().new_address().unwrap(); + let payer_addr_onchain = payer_node.onchain_payment().new_address().unwrap(); + + let premine_amount_sat = 10_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![service_addr_onchain, client_addr_onchain, payer_addr_onchain], + Amount::from_sat(premine_amount_sat), + ) + .await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + println!("Premine complete!"); + // Open a channel payer -> service that will allow paying the JIT invoice + open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd).await; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + expect_channel_ready_event!(payer_node, service_node.node_id()); + expect_channel_ready_event!(service_node, payer_node.node_id()); + + let invoice_description = + Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()); + let jit_amount_msat = 100_000_000; + + println!("Generating JIT invoice!"); + let manual_preimage = PaymentPreimage([42u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let res = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description.into(), + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let _payment_id = payer_node.bolt11_payment().send(&res, None).unwrap(); + let funding_txo = expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + println!("Waiting for funding transaction to be broadcast..."); + + // Check the nodes pick up on the confirmed funding tx now. + wait_for_tx(&electrsd.client, funding_txo.txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + assert_eq!( + client_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == service_node_id) + .unwrap() + .confirmations, + Some(6) + ); + assert_eq!( + service_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == client_node_id) + .unwrap() + .confirmations, + Some(6) + ); +} From 039aad4b0644d35b388b88c0641eb0f72a526d16 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 17 Nov 2025 15:48:45 +0100 Subject: [PATCH 153/184] Avoid explicit `panic`s in `handle_event` Previously, we'd explicitly `panic` on an APIMisuseError. While this error type should still never happen, we avoid explicit panics in favor of `debug_assert`s here. --- src/event.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/event.rs b/src/event.rs index 46488549c..a55ddb7fd 100644 --- a/src/event.rs +++ b/src/event.rs @@ -542,8 +542,12 @@ where match result { Ok(()) => {}, Err(APIError::APIMisuseError { err }) => { - log_error!(self.logger, "Panicking due to APIMisuseError: {}", err); - panic!("APIMisuseError: {}", err); + log_error!( + self.logger, + "Encountered APIMisuseError, this should never happen: {}", + err + ); + debug_assert!(false, "APIMisuseError: {}", err); }, Err(APIError::ChannelUnavailable { err }) => { log_error!( @@ -571,7 +575,7 @@ where ) .unwrap_or_else(|e| { log_error!(self.logger, "Failed to force close channel after funding generation failed: {:?}", e); - panic!( + debug_assert!(false, "Failed to force close channel after funding generation failed" ); }); From 65f6f7b8e0617a0ff686cc3bfa2225a2e9d09d93 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 22 Aug 2025 11:06:53 +0200 Subject: [PATCH 154/184] Use obf. key as `aad` for `StorableBuilder` and obfuscate namespaces We bump our `vss-client` dependency to include the changes to the `StorableBuilder` interface. Previously, we the `vss-client` didn't allow to set `ChaCha20Poly1305RFC`'s `aad` field, which had the `tag` not commit to any particular key. This would allow a malicious VSS provider to substitute blobs stored under a different key without the client noticing. Here, we now set the `aad` field to the key under which the `Storable` will be stored, ensuring that the retrieved data was originally stored under the key we expected, if `VssSchemaVersion::V1` is set. We also now obfuscate primary and secondary namespaces in the persisted keys, if `VssSchemaVersion::V1` is set. We also account for `StorableBuilder` now taking `data_decryption_key` by reference on `build`/`deconstruct`. --- Cargo.toml | 5 ++- src/io/vss_store.rs | 105 ++++++++++++++++++++++++++++++++------------ 2 files changed, 80 insertions(+), 30 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 544dfca08..8e2f04447 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -65,7 +65,7 @@ serde = { version = "1.0.210", default-features = false, features = ["std", "der serde_json = { version = "1.0.128", default-features = false, features = ["std"] } log = { version = "0.4.22", default-features = false, features = ["std"]} -vss-client = "0.3" +vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} [target.'cfg(windows)'.dependencies] @@ -151,3 +151,6 @@ harness = false #lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } + +#vss-client-ng = { path = "../vss-client" } +#vss-client-ng = { git = "https://github.com/lightningdevkit/vss-client", branch = "main" } diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index d6f157ae4..6da3a53db 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -45,11 +45,15 @@ type CustomRetryPolicy = FilteredRetryPolicy< Box bool + 'static + Send + Sync>, >; +#[derive(Debug, PartialEq)] enum VssSchemaVersion { // The initial schema version. // This used an empty `aad` and unobfuscated `primary_namespace`/`secondary_namespace`s in the // stored key. V0, + // The second deployed schema version. + // Here we started to obfuscate the primary and secondary namespaces and the obfuscated `store_key` (`obfuscate(primary_namespace#secondary_namespace)#obfuscate(key)`) is now used as `aad` for encryption, ensuring that the encrypted blobs commit to the key they're stored under. + V1, } // We set this to a small number of threads that would still allow to make some progress if one @@ -324,9 +328,10 @@ impl Drop for VssStore { } struct VssStoreInner { + schema_version: VssSchemaVersion, client: VssClient, store_id: String, - storable_builder: StorableBuilder, + data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, // Per-key locks that ensures that we don't have concurrent writes to the same namespace/key. // The lock also encapsulates the latest written version per key. @@ -339,10 +344,10 @@ impl VssStoreInner { base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, ) -> Self { + let schema_version = VssSchemaVersion::V0; let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); - let storable_builder = StorableBuilder::new(data_encryption_key, RandEntropySource); let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) .with_max_attempts(10) .with_max_total_delay(Duration::from_secs(15)) @@ -359,7 +364,15 @@ impl VssStoreInner { let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); let locks = Mutex::new(HashMap::new()); let pending_lazy_deletes = Mutex::new(Vec::new()); - Self { client, store_id, storable_builder, key_obfuscator, locks, pending_lazy_deletes } + Self { + schema_version, + client, + store_id, + data_encryption_key, + key_obfuscator, + locks, + pending_lazy_deletes, + } } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { @@ -370,17 +383,45 @@ impl VssStoreInner { fn build_obfuscated_key( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> String { - let obfuscated_key = self.key_obfuscator.obfuscate(key); - if primary_namespace.is_empty() { - obfuscated_key + if self.schema_version == VssSchemaVersion::V1 { + let obfuscated_prefix = + self.build_obfuscated_prefix(primary_namespace, secondary_namespace); + let obfuscated_key = self.key_obfuscator.obfuscate(key); + format!("{}#{}", obfuscated_prefix, obfuscated_key) + } else { + // Default to V0 schema + let obfuscated_key = self.key_obfuscator.obfuscate(key); + if primary_namespace.is_empty() { + obfuscated_key + } else { + format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key) + } + } + } + + fn build_obfuscated_prefix( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> String { + if self.schema_version == VssSchemaVersion::V1 { + let prefix = format!("{}#{}", primary_namespace, secondary_namespace); + self.key_obfuscator.obfuscate(&prefix) } else { - format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key) + // Default to V0 schema + format!("{}#{}", primary_namespace, secondary_namespace) } } fn extract_key(&self, unified_key: &str) -> io::Result { - let mut parts = unified_key.splitn(3, '#'); - let (_primary_namespace, _secondary_namespace) = (parts.next(), parts.next()); + let mut parts = if self.schema_version == VssSchemaVersion::V1 { + let mut parts = unified_key.splitn(2, '#'); + let _obfuscated_namespace = parts.next(); + parts + } else { + // Default to V0 schema + let mut parts = unified_key.splitn(3, '#'); + let (_primary_namespace, _secondary_namespace) = (parts.next(), parts.next()); + parts + }; match parts.next() { Some(obfuscated_key) => { let actual_key = self.key_obfuscator.deobfuscate(obfuscated_key)?; @@ -395,7 +436,7 @@ impl VssStoreInner { ) -> io::Result> { let mut page_token = None; let mut keys = vec![]; - let key_prefix = format!("{}#{}", primary_namespace, secondary_namespace); + let key_prefix = self.build_obfuscated_prefix(primary_namespace, secondary_namespace); while page_token != Some("".to_string()) { let request = ListKeyVersionsRequest { store_id: self.store_id.clone(), @@ -425,9 +466,8 @@ impl VssStoreInner { ) -> io::Result> { check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; - let obfuscated_key = - self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); - let request = GetObjectRequest { store_id: self.store_id.clone(), key: obfuscated_key }; + let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + let request = GetObjectRequest { store_id: self.store_id.clone(), key: store_key.clone() }; let resp = self.client.get_object(&request).await.map_err(|e| { let msg = format!( "Failed to read from key {}/{}/{}: {}", @@ -449,7 +489,11 @@ impl VssStoreInner { Error::new(ErrorKind::Other, msg) })?; - Ok(self.storable_builder.deconstruct(storable)?.0) + let storable_builder = StorableBuilder::new(RandEntropySource); + let aad = + if self.schema_version == VssSchemaVersion::V1 { store_key.as_bytes() } else { &[] }; + let decrypted = storable_builder.deconstruct(storable, &self.data_encryption_key, aad)?.0; + Ok(decrypted) } async fn write_internal( @@ -469,22 +513,25 @@ impl VssStoreInner { .ok() .and_then(|mut guard| guard.take()) .unwrap_or_default(); - self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { - let obfuscated_key = - self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); - let vss_version = -1; - let storable = self.storable_builder.build(buf, vss_version); - let request = PutObjectRequest { - store_id: self.store_id.clone(), - global_version: None, - transaction_items: vec![KeyValue { - key: obfuscated_key, - version: vss_version, - value: storable.encode_to_vec(), - }], - delete_items: delete_items.clone(), - }; + let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + let vss_version = -1; + let storable_builder = StorableBuilder::new(RandEntropySource); + let aad = + if self.schema_version == VssSchemaVersion::V1 { store_key.as_bytes() } else { &[] }; + let storable = + storable_builder.build(buf.to_vec(), vss_version, &self.data_encryption_key, aad); + let request = PutObjectRequest { + store_id: self.store_id.clone(), + global_version: None, + transaction_items: vec![KeyValue { + key: store_key, + version: vss_version, + value: storable.encode_to_vec(), + }], + delete_items: delete_items.clone(), + }; + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { self.client.put_object(&request).await.map_err(|e| { // Restore delete items so they'll be retried on next write. if !delete_items.is_empty() { From 86239cf9873ce2ad43b023b688218c9e7a87356e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 11 Nov 2025 13:35:34 +0100 Subject: [PATCH 155/184] Prefactor: move client construction out to `VssStore` While having it in `VssStoreInner` makes more sense, we now opt to construt the client (soon, clients) in `VssStore` and then hand it down to `VssStoreInner`. That will allow us to use the client once for checking the schema version before actually instantiating `VssStoreInner`. --- src/io/vss_store.rs | 50 ++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 6da3a53db..6ccf77080 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -82,7 +82,6 @@ impl VssStore { base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, runtime: Arc, ) -> Self { - let inner = Arc::new(VssStoreInner::new(base_url, store_id, vss_seed, header_provider)); let next_version = AtomicU64::new(1); let internal_runtime = Some( tokio::runtime::Builder::new_multi_thread() @@ -98,6 +97,33 @@ impl VssStore { .unwrap(), ); + let schema_version = VssSchemaVersion::V0; + let (data_encryption_key, obfuscation_master_key) = + derive_data_encryption_and_obfuscation_keys(&vss_seed); + let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); + let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) + .with_max_attempts(10) + .with_max_total_delay(Duration::from_secs(15)) + .with_max_jitter(Duration::from_millis(10)) + .skip_retry_on_error(Box::new(|e: &VssError| { + matches!( + e, + VssError::NoSuchKeyError(..) + | VssError::InvalidRequestError(..) + | VssError::ConflictError(..) + ) + }) as _); + + let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); + + let inner = Arc::new(VssStoreInner::new( + schema_version, + client, + store_id, + data_encryption_key, + key_obfuscator, + )); + Self { inner, next_version, runtime, internal_runtime } } @@ -341,27 +367,9 @@ struct VssStoreInner { impl VssStoreInner { pub(crate) fn new( - base_url: String, store_id: String, vss_seed: [u8; 32], - header_provider: Arc, + schema_version: VssSchemaVersion, client: VssClient, store_id: String, + data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, ) -> Self { - let schema_version = VssSchemaVersion::V0; - let (data_encryption_key, obfuscation_master_key) = - derive_data_encryption_and_obfuscation_keys(&vss_seed); - let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); - let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) - .with_max_attempts(10) - .with_max_total_delay(Duration::from_secs(15)) - .with_max_jitter(Duration::from_millis(10)) - .skip_retry_on_error(Box::new(|e: &VssError| { - matches!( - e, - VssError::NoSuchKeyError(..) - | VssError::InvalidRequestError(..) - | VssError::ConflictError(..) - ) - }) as _); - - let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); let locks = Mutex::new(HashMap::new()); let pending_lazy_deletes = Mutex::new(Vec::new()); Self { From a37517905a923f9b3ed443cb8142ef8a08c6f599 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 3 Nov 2025 13:15:36 +0100 Subject: [PATCH 156/184] Only use internal runtime in `VssStore` We previously attempted to drop the internal runtime from `VssStore`, resulting into blocking behavior. While we recently made changes that improved our situation (having VSS CI pass again pretty reliably), we just ran into yet another case where the VSS CI hung (cf. https://github.com/lightningdevkit/vss-server/actions/runs/19023212819/job/54322173817?pr=59). Here we attempt to restore even more of the original pre- ab3d78d1ecd05a755c836915284e5ca60c65692a / #623 behavior to get rid of the reappearing blocking behavior, i.e., only use the internal runtime in `VssStore`. --- src/builder.rs | 3 +- src/io/vss_store.rs | 83 ++++++++++++++++++++------------------------- 2 files changed, 38 insertions(+), 48 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index c0e39af7a..59f5b9b46 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -731,8 +731,7 @@ impl NodeBuilder { let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes(); - let vss_store = - VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider, Arc::clone(&runtime)); + let vss_store = VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider); build_with_store_internal( config, self.chain_data_source_config.as_ref(), diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 6ccf77080..31b7d71cb 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -36,7 +36,6 @@ use vss_client::util::retry::{ use vss_client::util::storable_builder::{EntropySource, StorableBuilder}; use crate::io::utils::check_namespace_key_validity; -use crate::runtime::Runtime; type CustomRetryPolicy = FilteredRetryPolicy< JitteredRetryPolicy< @@ -67,7 +66,6 @@ pub struct VssStore { // Version counter to ensure that writes are applied in the correct order. It is assumed that read and list // operations aren't sensitive to the order of execution. next_version: AtomicU64, - runtime: Arc, // A VSS-internal runtime we use to avoid any deadlocks we could hit when waiting on a spawned // blocking task to finish while the blocked thread had acquired the reactor. In particular, // this works around a previously-hit case where a concurrent call to @@ -80,7 +78,7 @@ pub struct VssStore { impl VssStore { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], - header_provider: Arc, runtime: Arc, + header_provider: Arc, ) -> Self { let next_version = AtomicU64::new(1); let internal_runtime = Some( @@ -124,7 +122,7 @@ impl VssStore { key_obfuscator, )); - Self { inner, next_version, runtime, internal_runtime } + Self { inner, next_version, internal_runtime } } // Same logic as for the obfuscated keys below, but just for locking, using the plaintext keys @@ -171,13 +169,14 @@ impl KVStoreSync for VssStore { async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }; // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::read timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || { + internal_runtime.block_on(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::read timed out"; + Error::new(ErrorKind::Other, msg) + }) + })? + }) } fn write( @@ -209,13 +208,14 @@ impl KVStoreSync for VssStore { }; // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::write timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || { + internal_runtime.block_on(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::write timed out"; + Error::new(ErrorKind::Other, msg) + }) + })? + }) } fn remove( @@ -247,13 +247,14 @@ impl KVStoreSync for VssStore { }; // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::remove timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || { + internal_runtime.block_on(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::remove timed out"; + Error::new(ErrorKind::Other, msg) + }) + })? + }) } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { @@ -268,13 +269,14 @@ impl KVStoreSync for VssStore { let fut = async move { inner.list_internal(primary_namespace, secondary_namespace).await }; // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always // times out. - let spawned_fut = internal_runtime.spawn(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::list timed out"; - Error::new(ErrorKind::Other, msg) - }) - }); - self.runtime.block_on(spawned_fut).expect("We should always finish")? + tokio::task::block_in_place(move || { + internal_runtime.block_on(async move { + tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { + let msg = "VssStore::list timed out"; + Error::new(ErrorKind::Other, msg) + }) + })? + }) } } @@ -694,7 +696,6 @@ mod tests { use super::*; use crate::io::test_utils::do_read_write_remove_list_persist; - use crate::logger::Logger; #[test] fn vss_read_write_remove_list_persist() { @@ -704,11 +705,7 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let logger = Arc::new(Logger::new_log_facade()); - let runtime = Arc::new(Runtime::new(logger).unwrap()); - let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); - + let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); do_read_write_remove_list_persist(&vss_store); } @@ -720,10 +717,7 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let logger = Arc::new(Logger::new_log_facade()); - let runtime = Arc::new(Runtime::new(logger).unwrap()); - let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); + let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); do_read_write_remove_list_persist(&vss_store); drop(vss_store) @@ -737,10 +731,7 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let logger = Arc::new(Logger::new_log_facade()); - let runtime = Arc::new(Runtime::new(logger).unwrap()); - let vss_store = - VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider, runtime); + let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); let primary_namespace = "test_namespace"; let secondary_namespace = ""; From 2af810335c2cbdd60cf6680b81f3bd432f747cbb Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 6 Nov 2025 12:39:51 +0100 Subject: [PATCH 157/184] Drop redundant `tokio::timeout`s for VSS IO Now that we rely on `reqwest` v0.12.* retry logic as well as client-side timeouts, we can address the remaining TODOs here and simply drop the redundant `tokio::timeout`s we previously added as a safeguard to blocking tasks (even though in the worst cases we saw they never actually fired). --- src/io/vss_store.rs | 45 ++++----------------------------------------- 1 file changed, 4 insertions(+), 41 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 31b7d71cb..0416b0463 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -58,7 +58,6 @@ enum VssSchemaVersion { // We set this to a small number of threads that would still allow to make some progress if one // would hit a blocking case const INTERNAL_RUNTIME_WORKERS: usize = 2; -const VSS_IO_TIMEOUT: Duration = Duration::from_secs(5); /// A [`KVStoreSync`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. pub struct VssStore { @@ -167,16 +166,7 @@ impl KVStoreSync for VssStore { let inner = Arc::clone(&self.inner); let fut = async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - tokio::task::block_in_place(move || { - internal_runtime.block_on(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::read timed out"; - Error::new(ErrorKind::Other, msg) - }) - })? - }) + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } fn write( @@ -206,16 +196,7 @@ impl KVStoreSync for VssStore { ) .await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - tokio::task::block_in_place(move || { - internal_runtime.block_on(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::write timed out"; - Error::new(ErrorKind::Other, msg) - }) - })? - }) + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } fn remove( @@ -245,16 +226,7 @@ impl KVStoreSync for VssStore { ) .await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - tokio::task::block_in_place(move || { - internal_runtime.block_on(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::remove timed out"; - Error::new(ErrorKind::Other, msg) - }) - })? - }) + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { @@ -267,16 +239,7 @@ impl KVStoreSync for VssStore { let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); let fut = async move { inner.list_internal(primary_namespace, secondary_namespace).await }; - // TODO: We could drop the timeout here once we ensured vss-client's Retry logic always - // times out. - tokio::task::block_in_place(move || { - internal_runtime.block_on(async move { - tokio::time::timeout(VSS_IO_TIMEOUT, fut).await.map_err(|_| { - let msg = "VssStore::list timed out"; - Error::new(ErrorKind::Other, msg) - }) - })? - }) + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } } From 37ee256162dbd236e31e460a546c6a294b8f15bf Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 10 Nov 2025 16:31:05 +0100 Subject: [PATCH 158/184] Bump retries and timeouts considerably --- src/io/vss_store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 0416b0463..ffff2b833 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -99,9 +99,9 @@ impl VssStore { derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) - .with_max_attempts(10) - .with_max_total_delay(Duration::from_secs(15)) - .with_max_jitter(Duration::from_millis(10)) + .with_max_attempts(100) + .with_max_total_delay(Duration::from_secs(180)) + .with_max_jitter(Duration::from_millis(100)) .skip_retry_on_error(Box::new(|e: &VssError| { matches!( e, From 37ed5c4a7b921d5b4238481847b54e49f1d21946 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 10 Nov 2025 16:44:26 +0100 Subject: [PATCH 159/184] Introduce two separate `VssClient`s for async/blocking contexts To avoid any blocking cross-runtime behavior that could arise from reusing a single client's TCP connections in different runtime contexts, we here split out the `VssStore` behavior to use one dedicated `VssClient` per context. I.e., we're now using two connections/connection pools and make sure only the `blocking_client` is used in `KVStoreSync` contexts, and `async_client` in `KVStore` contexts. --- src/io/vss_store.rs | 114 ++++++++++++++++++++++++++++++-------------- 1 file changed, 77 insertions(+), 37 deletions(-) diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index ffff2b833..f9ce602f0 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -98,24 +98,22 @@ impl VssStore { let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); - let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) - .with_max_attempts(100) - .with_max_total_delay(Duration::from_secs(180)) - .with_max_jitter(Duration::from_millis(100)) - .skip_retry_on_error(Box::new(|e: &VssError| { - matches!( - e, - VssError::NoSuchKeyError(..) - | VssError::InvalidRequestError(..) - | VssError::ConflictError(..) - ) - }) as _); - let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); + let sync_retry_policy = retry_policy(); + let blocking_client = VssClient::new_with_headers( + base_url.clone(), + sync_retry_policy, + header_provider.clone(), + ); + + let async_retry_policy = retry_policy(); + let async_client = + VssClient::new_with_headers(base_url, async_retry_policy, header_provider); let inner = Arc::new(VssStoreInner::new( schema_version, - client, + blocking_client, + async_client, store_id, data_encryption_key, key_obfuscator, @@ -164,8 +162,11 @@ impl KVStoreSync for VssStore { let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); let inner = Arc::clone(&self.inner); - let fut = - async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }; + let fut = async move { + inner + .read_internal(&inner.blocking_client, primary_namespace, secondary_namespace, key) + .await + }; tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } @@ -186,6 +187,7 @@ impl KVStoreSync for VssStore { let fut = async move { inner .write_internal( + &inner.blocking_client, inner_lock_ref, locking_key, version, @@ -216,6 +218,7 @@ impl KVStoreSync for VssStore { let fut = async move { inner .remove_internal( + &inner.blocking_client, inner_lock_ref, locking_key, version, @@ -238,7 +241,11 @@ impl KVStoreSync for VssStore { let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); - let fut = async move { inner.list_internal(primary_namespace, secondary_namespace).await }; + let fut = async move { + inner + .list_internal(&inner.blocking_client, primary_namespace, secondary_namespace) + .await + }; tokio::task::block_in_place(move || internal_runtime.block_on(fut)) } } @@ -251,9 +258,11 @@ impl KVStore for VssStore { let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); let inner = Arc::clone(&self.inner); - Box::pin( - async move { inner.read_internal(primary_namespace, secondary_namespace, key).await }, - ) + Box::pin(async move { + inner + .read_internal(&inner.async_client, primary_namespace, secondary_namespace, key) + .await + }) } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, @@ -267,6 +276,7 @@ impl KVStore for VssStore { Box::pin(async move { inner .write_internal( + &inner.async_client, inner_lock_ref, locking_key, version, @@ -290,6 +300,7 @@ impl KVStore for VssStore { Box::pin(async move { inner .remove_internal( + &inner.async_client, inner_lock_ref, locking_key, version, @@ -307,7 +318,9 @@ impl KVStore for VssStore { let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); - Box::pin(async move { inner.list_internal(primary_namespace, secondary_namespace).await }) + Box::pin(async move { + inner.list_internal(&inner.async_client, primary_namespace, secondary_namespace).await + }) } } @@ -320,7 +333,10 @@ impl Drop for VssStore { struct VssStoreInner { schema_version: VssSchemaVersion, - client: VssClient, + blocking_client: VssClient, + // A secondary client that will only be used for async persistence via `KVStore`, to ensure TCP + // connections aren't shared between our outer and the internal runtime. + async_client: VssClient, store_id: String, data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, @@ -332,14 +348,16 @@ struct VssStoreInner { impl VssStoreInner { pub(crate) fn new( - schema_version: VssSchemaVersion, client: VssClient, store_id: String, + schema_version: VssSchemaVersion, blocking_client: VssClient, + async_client: VssClient, store_id: String, data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, ) -> Self { let locks = Mutex::new(HashMap::new()); let pending_lazy_deletes = Mutex::new(Vec::new()); Self { schema_version, - client, + blocking_client, + async_client, store_id, data_encryption_key, key_obfuscator, @@ -405,7 +423,8 @@ impl VssStoreInner { } async fn list_all_keys( - &self, primary_namespace: &str, secondary_namespace: &str, + &self, client: &VssClient, primary_namespace: &str, + secondary_namespace: &str, ) -> io::Result> { let mut page_token = None; let mut keys = vec![]; @@ -418,7 +437,7 @@ impl VssStoreInner { page_size: None, }; - let response = self.client.list_key_versions(&request).await.map_err(|e| { + let response = client.list_key_versions(&request).await.map_err(|e| { let msg = format!( "Failed to list keys in {}/{}: {}", primary_namespace, secondary_namespace, e @@ -435,13 +454,14 @@ impl VssStoreInner { } async fn read_internal( - &self, primary_namespace: String, secondary_namespace: String, key: String, + &self, client: &VssClient, primary_namespace: String, + secondary_namespace: String, key: String, ) -> io::Result> { check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); let request = GetObjectRequest { store_id: self.store_id.clone(), key: store_key.clone() }; - let resp = self.client.get_object(&request).await.map_err(|e| { + let resp = client.get_object(&request).await.map_err(|e| { let msg = format!( "Failed to read from key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e @@ -470,8 +490,9 @@ impl VssStoreInner { } async fn write_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + &self, client: &VssClient, inner_lock_ref: Arc>, + locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, + key: String, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity( &primary_namespace, @@ -505,7 +526,7 @@ impl VssStoreInner { }; self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { - self.client.put_object(&request).await.map_err(|e| { + client.put_object(&request).await.map_err(|e| { // Restore delete items so they'll be retried on next write. if !delete_items.is_empty() { self.pending_lazy_deletes.lock().unwrap().extend(delete_items); @@ -524,8 +545,9 @@ impl VssStoreInner { } async fn remove_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + &self, client: &VssClient, inner_lock_ref: Arc>, + locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, + key: String, lazy: bool, ) -> io::Result<()> { check_namespace_key_validity( &primary_namespace, @@ -548,7 +570,7 @@ impl VssStoreInner { let request = DeleteObjectRequest { store_id: self.store_id.clone(), key_value: Some(key_value) }; - self.client.delete_object(&request).await.map_err(|e| { + client.delete_object(&request).await.map_err(|e| { let msg = format!( "Failed to delete key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e @@ -562,12 +584,15 @@ impl VssStoreInner { } async fn list_internal( - &self, primary_namespace: String, secondary_namespace: String, + &self, client: &VssClient, primary_namespace: String, + secondary_namespace: String, ) -> io::Result> { check_namespace_key_validity(&primary_namespace, &secondary_namespace, None, "list")?; - let keys = - self.list_all_keys(&primary_namespace, &secondary_namespace).await.map_err(|e| { + let keys = self + .list_all_keys(client, &primary_namespace, &secondary_namespace) + .await + .map_err(|e| { let msg = format!( "Failed to retrieve keys in namespace: {}/{} : {}", primary_namespace, secondary_namespace, e @@ -636,6 +661,21 @@ fn derive_data_encryption_and_obfuscation_keys(vss_seed: &[u8; 32]) -> ([u8; 32] (k1, k2) } +fn retry_policy() -> CustomRetryPolicy { + ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) + .with_max_attempts(100) + .with_max_total_delay(Duration::from_secs(180)) + .with_max_jitter(Duration::from_millis(100)) + .skip_retry_on_error(Box::new(|e: &VssError| { + matches!( + e, + VssError::NoSuchKeyError(..) + | VssError::InvalidRequestError(..) + | VssError::ConflictError(..) + ) + }) as _) +} + /// A source for generating entropy/randomness using [`rand`]. pub(crate) struct RandEntropySource; From 20a93c5758862307e8a2d7d1feb45238fe6db076 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 12 Nov 2025 11:31:12 +0100 Subject: [PATCH 160/184] Determine VSS schema version at startup Since we just made some breaking changes to how exactly we persist data via VSS (now using an `aad` that commits to the key and also obfuscating namespaces), we have to detect which schema version we're on to ensure backwards compatibility. To this end, we here start reading a persisted `vss_schema_version` key in `VssStore::new`. If it is present, we just return the encoded value (right now that can only be V1). If it is not present, it can either mean we run for the first time *or* we're on V0, which we determine checking if anything related to the `bdk_wallet` descriptors are present in the store. If we're running for the first time, we also persist the schema version to save us these rather inefficient steps on following startups. --- src/builder.rs | 7 +- src/io/vss_store.rs | 165 +++++++++++++++++++++++++++++++++++++++----- 2 files changed, 152 insertions(+), 20 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 59f5b9b46..b45f03f6d 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -731,7 +731,12 @@ impl NodeBuilder { let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes(); - let vss_store = VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider); + let vss_store = + VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider).map_err(|e| { + log_error!(logger, "Failed to setup VSS store: {}", e); + BuildError::KVStoreSetupFailed + })?; + build_with_store_internal( config, self.chain_data_source_config.as_ref(), diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index f9ce602f0..2906b89ca 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -17,8 +17,10 @@ use std::time::Duration; use bdk_chain::Merge; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; +use lightning::impl_writeable_tlv_based_enum; use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::{KVStore, KVStoreSync}; +use lightning::util::ser::{Readable, Writeable}; use prost::Message; use rand::RngCore; use vss_client::client::VssClient; @@ -55,6 +57,13 @@ enum VssSchemaVersion { V1, } +impl_writeable_tlv_based_enum!(VssSchemaVersion, + (0, V0) => {}, + (1, V1) => {}, +); + +const VSS_SCHEMA_VERSION_KEY: &str = "vss_schema_version"; + // We set this to a small number of threads that would still allow to make some progress if one // would hit a blocking case const INTERNAL_RUNTIME_WORKERS: usize = 2; @@ -78,23 +87,20 @@ impl VssStore { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, - ) -> Self { + ) -> io::Result { let next_version = AtomicU64::new(1); - let internal_runtime = Some( - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .thread_name_fn(|| { - static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); - let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst); - format!("ldk-node-vss-runtime-{}", id) - }) - .worker_threads(INTERNAL_RUNTIME_WORKERS) - .max_blocking_threads(INTERNAL_RUNTIME_WORKERS) - .build() - .unwrap(), - ); + let internal_runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name_fn(|| { + static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst); + format!("ldk-node-vss-runtime-{}", id) + }) + .worker_threads(INTERNAL_RUNTIME_WORKERS) + .max_blocking_threads(INTERNAL_RUNTIME_WORKERS) + .build() + .unwrap(); - let schema_version = VssSchemaVersion::V0; let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); @@ -106,6 +112,19 @@ impl VssStore { header_provider.clone(), ); + let runtime_handle = internal_runtime.handle(); + let schema_version = tokio::task::block_in_place(|| { + runtime_handle.block_on(async { + determine_and_write_schema_version( + &blocking_client, + &store_id, + data_encryption_key, + &key_obfuscator, + ) + .await + }) + })?; + let async_retry_policy = retry_policy(); let async_client = VssClient::new_with_headers(base_url, async_retry_policy, header_provider); @@ -119,7 +138,7 @@ impl VssStore { key_obfuscator, )); - Self { inner, next_version, internal_runtime } + Ok(Self { inner, next_version, internal_runtime: Some(internal_runtime) }) } // Same logic as for the obfuscated keys below, but just for locking, using the plaintext keys @@ -676,6 +695,111 @@ fn retry_policy() -> CustomRetryPolicy { }) as _) } +async fn determine_and_write_schema_version( + client: &VssClient, store_id: &String, data_encryption_key: [u8; 32], + key_obfuscator: &KeyObfuscator, +) -> io::Result { + // Build the obfuscated `vss_schema_version` key. + let obfuscated_prefix = key_obfuscator.obfuscate(&format! {"{}#{}", "", ""}); + let obfuscated_key = key_obfuscator.obfuscate(VSS_SCHEMA_VERSION_KEY); + let store_key = format!("{}#{}", obfuscated_prefix, obfuscated_key); + + // Try to read the stored schema version. + let request = GetObjectRequest { store_id: store_id.clone(), key: store_key.clone() }; + let resp = match client.get_object(&request).await { + Ok(resp) => Some(resp), + Err(VssError::NoSuchKeyError(..)) => { + // The value is not set. + None + }, + Err(e) => { + let msg = format!("Failed to read schema version: {}", e); + return Err(Error::new(ErrorKind::Other, msg)); + }, + }; + + if let Some(resp) = resp { + // The schema version was present, so just decrypt the stored data. + + // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise + // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] + let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + + let storable_builder = StorableBuilder::new(RandEntropySource); + // Schema version was added starting with V1, so if set at all, we use the key as `aad` + let aad = store_key.as_bytes(); + let decrypted = storable_builder + .deconstruct(storable, &data_encryption_key, aad) + .map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })? + .0; + + let schema_version: VssSchemaVersion = Readable::read(&mut io::Cursor::new(decrypted)) + .map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + Ok(schema_version) + } else { + // The schema version wasn't present, this either means we're running for the first time *or* it's V0 pre-migration (predating writing of the schema version). + + // Check if any `bdk_wallet` data was written by listing keys under the respective + // (unobfuscated) prefix. + const V0_BDK_WALLET_PREFIX: &str = "bdk_wallet#"; + let request = ListKeyVersionsRequest { + store_id: store_id.clone(), + key_prefix: Some(V0_BDK_WALLET_PREFIX.to_string()), + page_token: None, + page_size: None, + }; + + let response = client.list_key_versions(&request).await.map_err(|e| { + let msg = format!("Failed to determine schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + + let wallet_data_present = !response.key_versions.is_empty(); + if wallet_data_present { + // If the wallet data is present, it means we're not running for the first time. + Ok(VssSchemaVersion::V0) + } else { + // We're running for the first time, write the schema version to save unnecessary IOps + // on future startup. + let schema_version = VssSchemaVersion::V1; + let encoded_version = schema_version.encode(); + + let storable_builder = StorableBuilder::new(RandEntropySource); + let vss_version = -1; + let aad = store_key.as_bytes(); + let storable = + storable_builder.build(encoded_version, vss_version, &data_encryption_key, aad); + + let request = PutObjectRequest { + store_id: store_id.clone(), + global_version: None, + transaction_items: vec![KeyValue { + key: store_key, + version: vss_version, + value: storable.encode_to_vec(), + }], + delete_items: vec![], + }; + + client.put_object(&request).await.map_err(|e| { + let msg = format!("Failed to write schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + + Ok(schema_version) + } + } +} + /// A source for generating entropy/randomness using [`rand`]. pub(crate) struct RandEntropySource; @@ -708,7 +832,8 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); do_read_write_remove_list_persist(&vss_store); } @@ -720,7 +845,8 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); do_read_write_remove_list_persist(&vss_store); drop(vss_store) @@ -734,7 +860,8 @@ mod tests { let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); - let vss_store = VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); let primary_namespace = "test_namespace"; let secondary_namespace = ""; From 8e5503c89707d0c48ef4d5b820b5bcaedfb8b4a9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 13 Nov 2025 11:07:44 +0100 Subject: [PATCH 161/184] Add test ensuring backwards compatibility with VSS schema `v0` We add a test case that ensures that a node started and persisted on LDK Node v0.6.2 can still be successfully started with the new schema changes. Co-authored by Claude AI --- Cargo.toml | 3 ++ tests/integration_tests_vss.rs | 75 ++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 8e2f04447..34b16994f 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -91,6 +91,9 @@ clightningrpc = { version = "0.3.0-beta.8", default-features = false } lnd_grpc_rust = { version = "2.10.0", default-features = false } tokio = { version = "1.37", features = ["fs"] } +[target.'cfg(vss_test)'.dev-dependencies] +ldk-node-062 = { package = "ldk-node", version = "=0.6.2" } + [build-dependencies] uniffi = { version = "0.28.3", features = ["build"], optional = true } diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 93f167dae..03b3c8c06 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -12,6 +12,7 @@ mod common; use std::collections::HashMap; use ldk_node::Builder; +use rand::{rng, Rng}; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_with_vss_store() { @@ -55,3 +56,77 @@ async fn channel_full_cycle_with_vss_store() { ) .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn vss_v0_schema_backwards_compatibility() { + let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + + let rand_suffix: String = + (0..7).map(|_| rng().sample(rand::distr::Alphanumeric) as char).collect(); + let store_id = format!("v0_compat_test_{}", rand_suffix); + let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); + let seed_bytes = [42u8; 64]; + + // Setup a v0.6.2 `Node` persisted with the v0 scheme. + let (old_balance, old_node_id) = { + let mut builder_old = ldk_node_062::Builder::new(); + builder_old.set_network(bitcoin::Network::Regtest); + builder_old.set_storage_dir_path(storage_path.clone()); + builder_old.set_entropy_seed_bytes(seed_bytes); + builder_old.set_chain_source_esplora(esplora_url.clone(), None); + let node_old = builder_old + .build_with_vss_store_and_fixed_headers( + vss_base_url.clone(), + store_id.clone(), + HashMap::new(), + ) + .unwrap(); + + node_old.start().unwrap(); + let addr_old = node_old.onchain_payment().new_address().unwrap(); + common::premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_old], + bitcoin::Amount::from_sat(100_000), + ) + .await; + node_old.sync_wallets().unwrap(); + + let balance = node_old.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node_old.node_id(); + + // Workaround necessary as v0.6.2's VSS runtime wasn't dropsafe in a tokio context. + tokio::task::block_in_place(move || { + node_old.stop().unwrap(); + drop(node_old); + }); + + (balance, node_id) + }; + + // Now ensure we can still reinit from the same backend. + let mut builder_new = Builder::new(); + builder_new.set_network(bitcoin::Network::Regtest); + builder_new.set_storage_dir_path(storage_path); + builder_new.set_entropy_seed_bytes(seed_bytes); + builder_new.set_chain_source_esplora(esplora_url, None); + + let node_new = builder_new + .build_with_vss_store_and_fixed_headers(vss_base_url, store_id, HashMap::new()) + .unwrap(); + + node_new.start().unwrap(); + node_new.sync_wallets().unwrap(); + + let new_balance = node_new.list_balances().spendable_onchain_balance_sats; + let new_node_id = node_new.node_id(); + + assert_eq!(old_node_id, new_node_id); + assert_eq!(old_balance, new_balance); + + node_new.stop().unwrap(); +} From d2153f2c50b3294978df4af87c461c4b6f92cf42 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 13 Nov 2025 11:50:30 +0100 Subject: [PATCH 162/184] Add simple test ensuring we can restart from a VSS backend This is close to the backwards compatibility test we just added for v0, now just making sure we can actually read the data we persisted with our current (V1+) code. Co-authored by Claude AI --- tests/integration_tests_vss.rs | 66 ++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 03b3c8c06..3b384ec45 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -130,3 +130,69 @@ async fn vss_v0_schema_backwards_compatibility() { node_new.stop().unwrap(); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn vss_node_restart() { + let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + + let rand_suffix: String = + (0..7).map(|_| rng().sample(rand::distr::Alphanumeric) as char).collect(); + let store_id = format!("restart_test_{}", rand_suffix); + let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); + let seed_bytes = [42u8; 64]; + + // Setup initial node and fund it. + let (expected_balance_sats, expected_node_id) = { + let mut builder = Builder::new(); + builder.set_network(bitcoin::Network::Regtest); + builder.set_storage_dir_path(storage_path.clone()); + builder.set_entropy_seed_bytes(seed_bytes); + builder.set_chain_source_esplora(esplora_url.clone(), None); + let node = builder + .build_with_vss_store_and_fixed_headers( + vss_base_url.clone(), + store_id.clone(), + HashMap::new(), + ) + .unwrap(); + + node.start().unwrap(); + let addr = node.onchain_payment().new_address().unwrap(); + common::premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr], + bitcoin::Amount::from_sat(100_000), + ) + .await; + node.sync_wallets().unwrap(); + + let balance = node.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node.node_id(); + + node.stop().unwrap(); + (balance, node_id) + }; + + // Verify node can be restarted from VSS backend. + let mut builder = Builder::new(); + builder.set_network(bitcoin::Network::Regtest); + builder.set_storage_dir_path(storage_path); + builder.set_entropy_seed_bytes(seed_bytes); + builder.set_chain_source_esplora(esplora_url, None); + + let node = builder + .build_with_vss_store_and_fixed_headers(vss_base_url, store_id, HashMap::new()) + .unwrap(); + + node.start().unwrap(); + node.sync_wallets().unwrap(); + + assert_eq!(expected_node_id, node.node_id()); + assert_eq!(expected_balance_sats, node.list_balances().spendable_onchain_balance_sats); + + node.stop().unwrap(); +} From 5f1a872efa8b831a3b35bfdd2b6bd749fc7113a7 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 18 Nov 2025 11:13:19 +0100 Subject: [PATCH 163/184] Prefactor: Move `ChainSource` creation before `Wallet` creation In the following commits we will use the chain source to poll a best tip before intializing the listener objects. As a prefactor, we here move the creation of our onchain wallet after creation of the chain source, which in turn means we'll need to use the same pattern as for the other listeners, i.e., not giving the wallet reference to `ChainSource` on creation but rather handing it in when it's being used at runtime. --- src/builder.rs | 120 ++++++++++++++++++++---------------------- src/chain/bitcoind.rs | 54 +++++++++---------- src/chain/electrum.rs | 20 +++---- src/chain/esplora.rs | 21 ++++---- src/chain/mod.rs | 57 +++++++++++--------- src/lib.rs | 19 +++++-- 6 files changed, 150 insertions(+), 141 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index b45f03f6d..98650aa1a 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1178,54 +1178,6 @@ fn build_with_store_internal( } }, }; - - // Initialize the on-chain wallet and chain access - let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { - log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes - })?; - - let descriptor = Bip84(xprv, KeychainKind::External); - let change_descriptor = Bip84(xprv, KeychainKind::Internal); - let mut wallet_persister = - KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger)); - let wallet_opt = BdkWallet::load() - .descriptor(KeychainKind::External, Some(descriptor.clone())) - .descriptor(KeychainKind::Internal, Some(change_descriptor.clone())) - .extract_keys() - .check_network(config.network) - .load_wallet(&mut wallet_persister) - .map_err(|e| match e { - bdk_wallet::LoadWithPersistError::InvalidChangeSet( - bdk_wallet::LoadError::Mismatch(bdk_wallet::LoadMismatch::Network { - loaded, - expected, - }), - ) => { - log_error!( - logger, - "Failed to setup wallet: Networks do not match. Expected {} but got {}", - expected, - loaded - ); - BuildError::NetworkMismatch - }, - _ => { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - }, - })?; - let bdk_wallet = match wallet_opt { - Some(wallet) => wallet, - None => BdkWallet::create(descriptor, change_descriptor) - .network(config.network) - .create_wallet(&mut wallet_persister) - .map_err(|e| { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - })?, - }; - let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); let fee_estimator = Arc::new(OnchainFeeEstimator::new()); @@ -1243,16 +1195,6 @@ fn build_with_store_internal( }, }; - let wallet = Arc::new(Wallet::new( - bdk_wallet, - wallet_persister, - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&payment_store), - Arc::clone(&config), - Arc::clone(&logger), - )); - let chain_source = match chain_data_source_config { Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => { let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); @@ -1260,7 +1202,6 @@ fn build_with_store_internal( server_url.clone(), headers.clone(), sync_config, - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1274,7 +1215,6 @@ fn build_with_store_internal( Arc::new(ChainSource::new_electrum( server_url.clone(), sync_config, - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1295,7 +1235,6 @@ fn build_with_store_internal( *rpc_port, rpc_user.clone(), rpc_password.clone(), - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1309,7 +1248,6 @@ fn build_with_store_internal( *rpc_port, rpc_user.clone(), rpc_password.clone(), - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1327,7 +1265,6 @@ fn build_with_store_internal( server_url.clone(), HashMap::new(), sync_config, - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), @@ -1338,6 +1275,63 @@ fn build_with_store_internal( }, }; + // Initialize the on-chain wallet and chain access + let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { + log_error!(logger, "Failed to derive master secret: {}", e); + BuildError::InvalidSeedBytes + })?; + + let descriptor = Bip84(xprv, KeychainKind::External); + let change_descriptor = Bip84(xprv, KeychainKind::Internal); + let mut wallet_persister = + KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger)); + let wallet_opt = BdkWallet::load() + .descriptor(KeychainKind::External, Some(descriptor.clone())) + .descriptor(KeychainKind::Internal, Some(change_descriptor.clone())) + .extract_keys() + .check_network(config.network) + .load_wallet(&mut wallet_persister) + .map_err(|e| match e { + bdk_wallet::LoadWithPersistError::InvalidChangeSet( + bdk_wallet::LoadError::Mismatch(bdk_wallet::LoadMismatch::Network { + loaded, + expected, + }), + ) => { + log_error!( + logger, + "Failed to setup wallet: Networks do not match. Expected {} but got {}", + expected, + loaded + ); + BuildError::NetworkMismatch + }, + _ => { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + }, + })?; + let bdk_wallet = match wallet_opt { + Some(wallet) => wallet, + None => BdkWallet::create(descriptor, change_descriptor) + .network(config.network) + .create_wallet(&mut wallet_persister) + .map_err(|e| { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + })?, + }; + + let wallet = Arc::new(Wallet::new( + bdk_wallet, + wallet_persister, + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&payment_store), + Arc::clone(&config), + Arc::clone(&logger), + )); + // Initialize the KeysManager let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| { log_error!(logger, "Failed to get current time: {}", e); diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 4b7cd588f..4d7a4a0fe 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -47,7 +47,6 @@ pub(super) struct BitcoindChainSource { api_client: Arc, header_cache: tokio::sync::Mutex, latest_chain_tip: RwLock>, - onchain_wallet: Arc, wallet_polling_status: Mutex, fee_estimator: Arc, kv_store: Arc, @@ -59,9 +58,8 @@ pub(super) struct BitcoindChainSource { impl BitcoindChainSource { pub(crate) fn new_rpc( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - kv_store: Arc, config: Arc, logger: Arc, - node_metrics: Arc>, + fee_estimator: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, ) -> Self { let api_client = Arc::new(BitcoindClient::new_rpc( rpc_host.clone(), @@ -77,7 +75,6 @@ impl BitcoindChainSource { api_client, header_cache, latest_chain_tip, - onchain_wallet, wallet_polling_status, fee_estimator, kv_store, @@ -89,9 +86,9 @@ impl BitcoindChainSource { pub(crate) fn new_rest( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - kv_store: Arc, config: Arc, rest_client_config: BitcoindRestClientConfig, - logger: Arc, node_metrics: Arc>, + fee_estimator: Arc, kv_store: Arc, config: Arc, + rest_client_config: BitcoindRestClientConfig, logger: Arc, + node_metrics: Arc>, ) -> Self { let api_client = Arc::new(BitcoindClient::new_rest( rest_client_config.rest_host, @@ -111,7 +108,6 @@ impl BitcoindChainSource { header_cache, latest_chain_tip, wallet_polling_status, - onchain_wallet, fee_estimator, kv_store, config, @@ -126,8 +122,8 @@ impl BitcoindChainSource { pub(super) async fn continuously_sync_wallets( &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, - channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, + onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, ) { // First register for the wallet polling status to make sure `Node::sync_wallets` calls // wait on the result before proceeding. @@ -155,14 +151,10 @@ impl BitcoindChainSource { let channel_manager_best_block_hash = channel_manager.current_best_block().block_hash; let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; - let onchain_wallet_best_block_hash = - self.onchain_wallet.current_best_block().block_hash; + let onchain_wallet_best_block_hash = onchain_wallet.current_best_block().block_hash; let mut chain_listeners = vec![ - ( - onchain_wallet_best_block_hash, - &*self.onchain_wallet as &(dyn Listen + Send + Sync), - ), + (onchain_wallet_best_block_hash, &*onchain_wallet as &(dyn Listen + Send + Sync)), (channel_manager_best_block_hash, &*channel_manager as &(dyn Listen + Send + Sync)), (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), ]; @@ -307,6 +299,7 @@ impl BitcoindChainSource { return; } _ = self.poll_and_update_listeners( + Arc::clone(&onchain_wallet), Arc::clone(&channel_manager), Arc::clone(&chain_monitor), Arc::clone(&output_sweeper) @@ -337,8 +330,8 @@ impl BitcoindChainSource { } pub(super) async fn poll_and_update_listeners( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, + &self, onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { let receiver_res = { let mut status_lock = self.wallet_polling_status.lock().unwrap(); @@ -355,7 +348,12 @@ impl BitcoindChainSource { } let res = self - .poll_and_update_listeners_inner(channel_manager, chain_monitor, output_sweeper) + .poll_and_update_listeners_inner( + onchain_wallet, + channel_manager, + chain_monitor, + output_sweeper, + ) .await; self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); @@ -364,8 +362,8 @@ impl BitcoindChainSource { } async fn poll_and_update_listeners_inner( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, + &self, onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); let chain_tip = if let Some(tip) = latest_chain_tip_opt { @@ -386,7 +384,7 @@ impl BitcoindChainSource { let mut locked_header_cache = self.header_cache.lock().await; let chain_poller = ChainPoller::new(Arc::clone(&self.api_client), self.config.network); let chain_listener = ChainListener { - onchain_wallet: Arc::clone(&self.onchain_wallet), + onchain_wallet: Arc::clone(&onchain_wallet), channel_manager: Arc::clone(&channel_manager), chain_monitor: Arc::clone(&chain_monitor), output_sweeper, @@ -422,7 +420,7 @@ impl BitcoindChainSource { let cur_height = channel_manager.current_best_block().height; let now = SystemTime::now(); - let bdk_unconfirmed_txids = self.onchain_wallet.get_unconfirmed_txids(); + let bdk_unconfirmed_txids = onchain_wallet.get_unconfirmed_txids(); match self .api_client .get_updated_mempool_transactions(cur_height, bdk_unconfirmed_txids) @@ -436,11 +434,11 @@ impl BitcoindChainSource { evicted_txids.len(), now.elapsed().unwrap().as_millis() ); - self.onchain_wallet - .apply_mempool_txs(unconfirmed_txs, evicted_txids) - .unwrap_or_else(|e| { + onchain_wallet.apply_mempool_txs(unconfirmed_txs, evicted_txids).unwrap_or_else( + |e| { log_error!(self.logger, "Failed to apply mempool transactions: {:?}", e); - }); + }, + ); }, Err(e) => { log_error!(self.logger, "Failed to poll for mempool transactions: {:?}", e); diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index dbd0d9f7f..9e05dfaee 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -47,7 +47,6 @@ pub(super) struct ElectrumChainSource { server_url: String, pub(super) sync_config: ElectrumSyncConfig, electrum_runtime_status: RwLock, - onchain_wallet: Arc, onchain_wallet_sync_status: Mutex, lightning_wallet_sync_status: Mutex, fee_estimator: Arc, @@ -59,7 +58,7 @@ pub(super) struct ElectrumChainSource { impl ElectrumChainSource { pub(super) fn new( - server_url: String, sync_config: ElectrumSyncConfig, onchain_wallet: Arc, + server_url: String, sync_config: ElectrumSyncConfig, fee_estimator: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, ) -> Self { @@ -70,7 +69,6 @@ impl ElectrumChainSource { server_url, sync_config, electrum_runtime_status, - onchain_wallet, onchain_wallet_sync_status, lightning_wallet_sync_status, fee_estimator, @@ -94,7 +92,9 @@ impl ElectrumChainSource { self.electrum_runtime_status.write().unwrap().stop(); } - pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + pub(crate) async fn sync_onchain_wallet( + &self, onchain_wallet: Arc, + ) -> Result<(), Error> { let receiver_res = { let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); status_lock.register_or_subscribe_pending_sync() @@ -108,14 +108,14 @@ impl ElectrumChainSource { })?; } - let res = self.sync_onchain_wallet_inner().await; + let res = self.sync_onchain_wallet_inner(onchain_wallet).await; self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); res } - async fn sync_onchain_wallet_inner(&self) -> Result<(), Error> { + async fn sync_onchain_wallet_inner(&self, onchain_wallet: Arc) -> Result<(), Error> { let electrum_client: Arc = if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { Arc::clone(client) @@ -133,7 +133,7 @@ impl ElectrumChainSource { let apply_wallet_update = |update_res: Result, now: Instant| match update_res { - Ok(update) => match self.onchain_wallet.apply_update(update) { + Ok(update) => match onchain_wallet.apply_update(update) { Ok(()) => { log_info!( self.logger, @@ -160,10 +160,10 @@ impl ElectrumChainSource { Err(e) => Err(e), }; - let cached_txs = self.onchain_wallet.get_cached_txs(); + let cached_txs = onchain_wallet.get_cached_txs(); let res = if incremental_sync { - let incremental_sync_request = self.onchain_wallet.get_incremental_sync_request(); + let incremental_sync_request = onchain_wallet.get_incremental_sync_request(); let incremental_sync_fut = electrum_client .get_incremental_sync_wallet_update(incremental_sync_request, cached_txs); @@ -171,7 +171,7 @@ impl ElectrumChainSource { let update_res = incremental_sync_fut.await.map(|u| u.into()); apply_wallet_update(update_res, now) } else { - let full_scan_request = self.onchain_wallet.get_full_scan_request(); + let full_scan_request = onchain_wallet.get_full_scan_request(); let full_scan_fut = electrum_client.get_full_scan_wallet_update(full_scan_request, cached_txs); let now = Instant::now(); diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index be6f2fb86..f6f313955 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -34,7 +34,6 @@ use crate::{Error, NodeMetrics}; pub(super) struct EsploraChainSource { pub(super) sync_config: EsploraSyncConfig, esplora_client: EsploraAsyncClient, - onchain_wallet: Arc, onchain_wallet_sync_status: Mutex, tx_sync: Arc>>, lightning_wallet_sync_status: Mutex, @@ -48,9 +47,8 @@ pub(super) struct EsploraChainSource { impl EsploraChainSource { pub(crate) fn new( server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, - onchain_wallet: Arc, fee_estimator: Arc, - kv_store: Arc, config: Arc, logger: Arc, - node_metrics: Arc>, + fee_estimator: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, ) -> Self { let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); @@ -68,7 +66,6 @@ impl EsploraChainSource { Self { sync_config, esplora_client, - onchain_wallet, onchain_wallet_sync_status, tx_sync, lightning_wallet_sync_status, @@ -80,7 +77,9 @@ impl EsploraChainSource { } } - pub(super) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + pub(super) async fn sync_onchain_wallet( + &self, onchain_wallet: Arc, + ) -> Result<(), Error> { let receiver_res = { let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); status_lock.register_or_subscribe_pending_sync() @@ -94,14 +93,14 @@ impl EsploraChainSource { })?; } - let res = self.sync_onchain_wallet_inner().await; + let res = self.sync_onchain_wallet_inner(onchain_wallet).await; self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); res } - async fn sync_onchain_wallet_inner(&self) -> Result<(), Error> { + async fn sync_onchain_wallet_inner(&self, onchain_wallet: Arc) -> Result<(), Error> { // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. let incremental_sync = @@ -112,7 +111,7 @@ impl EsploraChainSource { let now = Instant::now(); match $sync_future.await { Ok(res) => match res { - Ok(update) => match self.onchain_wallet.apply_update(update) { + Ok(update) => match onchain_wallet.apply_update(update) { Ok(()) => { log_info!( self.logger, @@ -182,14 +181,14 @@ impl EsploraChainSource { } if incremental_sync { - let sync_request = self.onchain_wallet.get_incremental_sync_request(); + let sync_request = onchain_wallet.get_incremental_sync_request(); let wallet_sync_timeout_fut = tokio::time::timeout( Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), ); get_and_apply_wallet_update!(wallet_sync_timeout_fut) } else { - let full_scan_request = self.onchain_wallet.get_full_scan_request(); + let full_scan_request = onchain_wallet.get_full_scan_request(); let wallet_sync_timeout_fut = tokio::time::timeout( Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), self.esplora_client.full_scan( diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 309d60eab..9c7ddd817 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -99,15 +99,14 @@ enum ChainSourceKind { impl ChainSource { pub(crate) fn new_esplora( server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - logger: Arc, node_metrics: Arc>, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, ) -> Self { let esplora_chain_source = EsploraChainSource::new( server_url, headers, sync_config, - onchain_wallet, fee_estimator, kv_store, config, @@ -119,7 +118,7 @@ impl ChainSource { } pub(crate) fn new_electrum( - server_url: String, sync_config: ElectrumSyncConfig, onchain_wallet: Arc, + server_url: String, sync_config: ElectrumSyncConfig, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, @@ -127,7 +126,6 @@ impl ChainSource { let electrum_chain_source = ElectrumChainSource::new( server_url, sync_config, - onchain_wallet, fee_estimator, kv_store, config, @@ -140,16 +138,15 @@ impl ChainSource { pub(crate) fn new_bitcoind_rpc( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - logger: Arc, node_metrics: Arc>, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, ) -> Self { let bitcoind_chain_source = BitcoindChainSource::new_rpc( rpc_host, rpc_port, rpc_user, rpc_password, - onchain_wallet, fee_estimator, kv_store, config, @@ -162,17 +159,15 @@ impl ChainSource { pub(crate) fn new_bitcoind_rest( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, - rest_client_config: BitcoindRestClientConfig, logger: Arc, - node_metrics: Arc>, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, rest_client_config: BitcoindRestClientConfig, + logger: Arc, node_metrics: Arc>, ) -> Self { let bitcoind_chain_source = BitcoindChainSource::new_rest( rpc_host, rpc_port, rpc_user, rpc_password, - onchain_wallet, fee_estimator, kv_store, config, @@ -223,7 +218,7 @@ impl ChainSource { } pub(crate) async fn continuously_sync_wallets( - &self, stop_sync_receiver: tokio::sync::watch::Receiver<()>, + &self, stop_sync_receiver: tokio::sync::watch::Receiver<()>, onchain_wallet: Arc, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) { @@ -234,6 +229,7 @@ impl ChainSource { { self.start_tx_based_sync_loop( stop_sync_receiver, + onchain_wallet, channel_manager, chain_monitor, output_sweeper, @@ -256,6 +252,7 @@ impl ChainSource { { self.start_tx_based_sync_loop( stop_sync_receiver, + onchain_wallet, channel_manager, chain_monitor, output_sweeper, @@ -276,6 +273,7 @@ impl ChainSource { bitcoind_chain_source .continuously_sync_wallets( stop_sync_receiver, + onchain_wallet, channel_manager, chain_monitor, output_sweeper, @@ -287,9 +285,9 @@ impl ChainSource { async fn start_tx_based_sync_loop( &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, - channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, background_sync_config: &BackgroundSyncConfig, - logger: Arc, + onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, + background_sync_config: &BackgroundSyncConfig, logger: Arc, ) { // Setup syncing intervals let onchain_wallet_sync_interval_secs = background_sync_config @@ -328,7 +326,7 @@ impl ChainSource { return; } _ = onchain_wallet_sync_interval.tick() => { - let _ = self.sync_onchain_wallet().await; + let _ = self.sync_onchain_wallet(Arc::clone(&onchain_wallet)).await; } _ = fee_rate_update_interval.tick() => { let _ = self.update_fee_rate_estimates().await; @@ -346,13 +344,15 @@ impl ChainSource { // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) - pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + pub(crate) async fn sync_onchain_wallet( + &self, onchain_wallet: Arc, + ) -> Result<(), Error> { match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { - esplora_chain_source.sync_onchain_wallet().await + esplora_chain_source.sync_onchain_wallet(onchain_wallet).await }, ChainSourceKind::Electrum(electrum_chain_source) => { - electrum_chain_source.sync_onchain_wallet().await + electrum_chain_source.sync_onchain_wallet(onchain_wallet).await }, ChainSourceKind::Bitcoind { .. } => { // In BitcoindRpc mode we sync lightning and onchain wallet in one go via @@ -388,8 +388,8 @@ impl ChainSource { } pub(crate) async fn poll_and_update_listeners( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, + &self, onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { match &self.kind { ChainSourceKind::Esplora { .. } => { @@ -404,7 +404,12 @@ impl ChainSource { }, ChainSourceKind::Bitcoind(bitcoind_chain_source) => { bitcoind_chain_source - .poll_and_update_listeners(channel_manager, chain_monitor, output_sweeper) + .poll_and_update_listeners( + onchain_wallet, + channel_manager, + chain_monitor, + output_sweeper, + ) .await }, } diff --git a/src/lib.rs b/src/lib.rs index 9c2a733b0..4d84c3c99 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -238,12 +238,19 @@ impl Node { // Spawn background task continuously syncing onchain, lightning, and fee rate cache. let stop_sync_receiver = self.stop_sender.subscribe(); let chain_source = Arc::clone(&self.chain_source); + let sync_wallet = Arc::clone(&self.wallet); let sync_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); self.runtime.spawn_background_task(async move { chain_source - .continuously_sync_wallets(stop_sync_receiver, sync_cman, sync_cmon, sync_sweeper) + .continuously_sync_wallets( + stop_sync_receiver, + sync_wallet, + sync_cman, + sync_cmon, + sync_sweeper, + ) .await; }); @@ -1235,6 +1242,7 @@ impl Node { } let chain_source = Arc::clone(&self.chain_source); + let sync_wallet = Arc::clone(&self.wallet); let sync_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); @@ -1244,11 +1252,16 @@ impl Node { chain_source .sync_lightning_wallet(sync_cman, sync_cmon, Arc::clone(&sync_sweeper)) .await?; - chain_source.sync_onchain_wallet().await?; + chain_source.sync_onchain_wallet(sync_wallet).await?; } else { chain_source.update_fee_rate_estimates().await?; chain_source - .poll_and_update_listeners(sync_cman, sync_cmon, Arc::clone(&sync_sweeper)) + .poll_and_update_listeners( + sync_wallet, + sync_cman, + sync_cmon, + Arc::clone(&sync_sweeper), + ) .await?; } let _ = sync_sweeper.regenerate_and_broadcast_spend_if_necessary().await; From c0880d9d974f5cd6740151401bd6bfa4193172dd Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 18 Nov 2025 09:58:15 +0100 Subject: [PATCH 164/184] Try to poll chain tip on initialization Previously, we couldn't poll the chain tip in `Builder::build` as we wouldn't have a runtime available. Since we now do, we can at least attempt to poll for the chain tip before initializing objects, avoiding that fresh nodes need to re-validate everything from genesis. --- src/builder.rs | 112 +++++++++++++++++++++++++----------------- src/chain/bitcoind.rs | 46 +++++++++++------ src/chain/mod.rs | 24 ++++----- 3 files changed, 111 insertions(+), 71 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 98650aa1a..183c7513b 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1195,10 +1195,10 @@ fn build_with_store_internal( }, }; - let chain_source = match chain_data_source_config { + let (chain_source, chain_tip_opt) = match chain_data_source_config { Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => { let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); - Arc::new(ChainSource::new_esplora( + ChainSource::new_esplora( server_url.clone(), headers.clone(), sync_config, @@ -1208,11 +1208,11 @@ fn build_with_store_internal( Arc::clone(&config), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + ) }, Some(ChainDataSourceConfig::Electrum { server_url, sync_config }) => { let sync_config = sync_config.unwrap_or(ElectrumSyncConfig::default()); - Arc::new(ChainSource::new_electrum( + ChainSource::new_electrum( server_url.clone(), sync_config, Arc::clone(&fee_estimator), @@ -1221,7 +1221,7 @@ fn build_with_store_internal( Arc::clone(&config), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + ) }, Some(ChainDataSourceConfig::Bitcoind { rpc_host, @@ -1230,38 +1230,44 @@ fn build_with_store_internal( rpc_password, rest_client_config, }) => match rest_client_config { - Some(rest_client_config) => Arc::new(ChainSource::new_bitcoind_rest( - rpc_host.clone(), - *rpc_port, - rpc_user.clone(), - rpc_password.clone(), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - rest_client_config.clone(), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )), - None => Arc::new(ChainSource::new_bitcoind_rpc( - rpc_host.clone(), - *rpc_port, - rpc_user.clone(), - rpc_password.clone(), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )), + Some(rest_client_config) => runtime.block_on(async { + ChainSource::new_bitcoind_rest( + rpc_host.clone(), + *rpc_port, + rpc_user.clone(), + rpc_password.clone(), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), + rest_client_config.clone(), + Arc::clone(&logger), + Arc::clone(&node_metrics), + ) + .await + }), + None => runtime.block_on(async { + ChainSource::new_bitcoind_rpc( + rpc_host.clone(), + *rpc_port, + rpc_user.clone(), + rpc_password.clone(), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), + Arc::clone(&logger), + Arc::clone(&node_metrics), + ) + .await + }), }, None => { // Default to Esplora client. let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); let sync_config = EsploraSyncConfig::default(); - Arc::new(ChainSource::new_esplora( + ChainSource::new_esplora( server_url.clone(), HashMap::new(), sync_config, @@ -1271,9 +1277,10 @@ fn build_with_store_internal( Arc::clone(&config), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + ) }, }; + let chain_source = Arc::new(chain_source); // Initialize the on-chain wallet and chain access let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { @@ -1313,13 +1320,31 @@ fn build_with_store_internal( })?; let bdk_wallet = match wallet_opt { Some(wallet) => wallet, - None => BdkWallet::create(descriptor, change_descriptor) - .network(config.network) - .create_wallet(&mut wallet_persister) - .map_err(|e| { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - })?, + None => { + let mut wallet = BdkWallet::create(descriptor, change_descriptor) + .network(config.network) + .create_wallet(&mut wallet_persister) + .map_err(|e| { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + })?; + + if let Some(best_block) = chain_tip_opt { + // Insert the first checkpoint if we have it, to avoid resyncing from genesis. + // TODO: Use a proper wallet birthday once BDK supports it. + let mut latest_checkpoint = wallet.latest_checkpoint(); + let block_id = + bdk_chain::BlockId { height: best_block.height, hash: best_block.block_hash }; + latest_checkpoint = latest_checkpoint.insert(block_id); + let update = + bdk_wallet::Update { chain: Some(latest_checkpoint), ..Default::default() }; + wallet.apply_update(update).map_err(|e| { + log_error!(logger, "Failed to apply checkpoint during wallet setup: {}", e); + BuildError::WalletSetupFailed + })?; + } + wallet + }, }; let wallet = Arc::new(Wallet::new( @@ -1499,13 +1524,10 @@ fn build_with_store_internal( channel_manager } else { // We're starting a fresh node. - let genesis_block_hash = - bitcoin::blockdata::constants::genesis_block(config.network).block_hash(); + let best_block = + chain_tip_opt.unwrap_or_else(|| BestBlock::from_network(config.network)); - let chain_params = ChainParameters { - network: config.network.into(), - best_block: BestBlock::new(genesis_block_hash, 0), - }; + let chain_params = ChainParameters { network: config.network.into(), best_block }; channelmanager::ChannelManager::new( Arc::clone(&fee_estimator), Arc::clone(&chain_monitor), diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 4d7a4a0fe..b3d7880d6 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -14,7 +14,7 @@ use base64::prelude::BASE64_STANDARD; use base64::Engine; use bitcoin::{BlockHash, FeeRate, Network, Transaction, Txid}; use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; -use lightning::chain::Listen; +use lightning::chain::{BestBlock, Listen}; use lightning::util::ser::Writeable; use lightning_block_sync::gossip::UtxoSource; use lightning_block_sync::http::{HttpEndpoint, JsonResponse}; @@ -42,6 +42,7 @@ use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; +const CHAIN_POLLING_TIMEOUT_SECS: u64 = 10; pub(super) struct BitcoindChainSource { api_client: Arc, @@ -329,6 +330,33 @@ impl BitcoindChainSource { } } + pub(super) async fn poll_best_block(&self) -> Result { + self.poll_chain_tip().await.map(|tip| tip.to_best_block()) + } + + async fn poll_chain_tip(&self) -> Result { + let validate_res = tokio::time::timeout( + Duration::from_secs(CHAIN_POLLING_TIMEOUT_SECS), + validate_best_block_header(self.api_client.as_ref()), + ) + .await + .map_err(|e| { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + Error::TxSyncTimeout + })?; + + match validate_res { + Ok(tip) => { + *self.latest_chain_tip.write().unwrap() = Some(tip); + Ok(tip) + }, + Err(e) => { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + return Err(Error::TxSyncFailed); + }, + } + } + pub(super) async fn poll_and_update_listeners( &self, onchain_wallet: Arc, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, @@ -366,20 +394,8 @@ impl BitcoindChainSource { chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); - let chain_tip = if let Some(tip) = latest_chain_tip_opt { - tip - } else { - match validate_best_block_header(self.api_client.as_ref()).await { - Ok(tip) => { - *self.latest_chain_tip.write().unwrap() = Some(tip); - tip - }, - Err(e) => { - log_error!(self.logger, "Failed to poll for chain data: {:?}", e); - return Err(Error::TxSyncFailed); - }, - } - }; + let chain_tip = + if let Some(tip) = latest_chain_tip_opt { tip } else { self.poll_chain_tip().await? }; let mut locked_header_cache = self.header_cache.lock().await; let chain_poller = ChainPoller::new(Arc::clone(&self.api_client), self.config.network); diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 9c7ddd817..2cd98e20d 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -14,7 +14,7 @@ use std::sync::{Arc, RwLock}; use std::time::Duration; use bitcoin::{Script, Txid}; -use lightning::chain::Filter; +use lightning::chain::{BestBlock, Filter}; use lightning_block_sync::gossip::UtxoSource; use crate::chain::bitcoind::BitcoindChainSource; @@ -102,7 +102,7 @@ impl ChainSource { fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> (Self, Option) { let esplora_chain_source = EsploraChainSource::new( server_url, headers, @@ -114,7 +114,7 @@ impl ChainSource { node_metrics, ); let kind = ChainSourceKind::Esplora(esplora_chain_source); - Self { kind, tx_broadcaster, logger } + (Self { kind, tx_broadcaster, logger }, None) } pub(crate) fn new_electrum( @@ -122,7 +122,7 @@ impl ChainSource { fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> (Self, Option) { let electrum_chain_source = ElectrumChainSource::new( server_url, sync_config, @@ -133,15 +133,15 @@ impl ChainSource { node_metrics, ); let kind = ChainSourceKind::Electrum(electrum_chain_source); - Self { kind, tx_broadcaster, logger } + (Self { kind, tx_broadcaster, logger }, None) } - pub(crate) fn new_bitcoind_rpc( + pub(crate) async fn new_bitcoind_rpc( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> (Self, Option) { let bitcoind_chain_source = BitcoindChainSource::new_rpc( rpc_host, rpc_port, @@ -153,16 +153,17 @@ impl ChainSource { Arc::clone(&logger), node_metrics, ); + let best_block = bitcoind_chain_source.poll_best_block().await.ok(); let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); - Self { kind, tx_broadcaster, logger } + (Self { kind, tx_broadcaster, logger }, best_block) } - pub(crate) fn new_bitcoind_rest( + pub(crate) async fn new_bitcoind_rest( rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, rest_client_config: BitcoindRestClientConfig, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> (Self, Option) { let bitcoind_chain_source = BitcoindChainSource::new_rest( rpc_host, rpc_port, @@ -175,8 +176,9 @@ impl ChainSource { Arc::clone(&logger), node_metrics, ); + let best_block = bitcoind_chain_source.poll_best_block().await.ok(); let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); - Self { kind, tx_broadcaster, logger } + (Self { kind, tx_broadcaster, logger }, best_block) } pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { From 0a860d46da4ba2d597cbbe139f25e08931fa12da Mon Sep 17 00:00:00 2001 From: coreyphillips Date: Mon, 10 Nov 2025 13:16:20 -0500 Subject: [PATCH 165/184] feat: add configurable BIP39 mnemonic word counts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support generating BIP39 mnemonics with configurable word counts (12, 15, 18, 21, 24). Defaults to 24 words (256-bit entropy) for backward compatibility. - Add WordCount enum (12–24 variants) - Update generate_entropy_mnemonic to accept optional word_count - Remove need for entropy_bytes in generate_entropy_mnemonic by passing WordCount enum directly to generate() instead - Add rand feature to bip39 dependency - Extend tests for all word count options and defaults - Expose enum and updated function in UDL bindings --- Cargo.toml | 2 +- bindings/ldk_node.udl | 10 +++++++++- src/io/utils.rs | 44 ++++++++++++++++++++++++++++++++++--------- src/lib.rs | 1 + src/types.rs | 28 +++++++++++++++++++++++++++ 5 files changed, 74 insertions(+), 11 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 544dfca08..2aa147a77 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ reqwest = { version = "0.12", default-features = false, features = ["json", "rus rustls = { version = "0.23", default-features = false } rusqlite = { version = "0.31.0", features = ["bundled"] } bitcoin = "0.32.7" -bip39 = "2.0.0" +bip39 = { version = "2.0.0", features = ["rand"] } bip21 = { version = "0.5", features = ["std"], default-features = false } base64 = { version = "0.22.1", default-features = false, features = ["std"] } diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index ab2f483a1..009126feb 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -1,5 +1,5 @@ namespace ldk_node { - Mnemonic generate_entropy_mnemonic(); + Mnemonic generate_entropy_mnemonic(WordCount? word_count); Config default_config(); }; @@ -46,6 +46,14 @@ dictionary LSPS2ServiceConfig { u64 max_payment_size_msat; }; +enum WordCount { + "Words12", + "Words15", + "Words18", + "Words21", + "Words24", +}; + enum LogLevel { "Gossip", "Trace", diff --git a/src/io/utils.rs b/src/io/utils.rs index d92c9486b..1b4b02a82 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -47,13 +47,15 @@ use crate::io::{ }; use crate::logger::{log_error, LdkLogger, Logger}; use crate::peer_store::PeerStore; -use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; +use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper, WordCount}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_scores_cache"; -/// Generates a random [BIP 39] mnemonic. +/// Generates a random [BIP 39] mnemonic with the specified word count. +/// +/// If no word count is specified, defaults to 24 words (256-bit entropy). /// /// The result may be used to initialize the [`Node`] entropy, i.e., can be given to /// [`Builder::set_entropy_bip39_mnemonic`]. @@ -61,11 +63,9 @@ pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_sc /// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki /// [`Node`]: crate::Node /// [`Builder::set_entropy_bip39_mnemonic`]: crate::Builder::set_entropy_bip39_mnemonic -pub fn generate_entropy_mnemonic() -> Mnemonic { - // bip39::Mnemonic supports 256 bit entropy max - let mut entropy = [0; 32]; - OsRng.try_fill_bytes(&mut entropy).expect("Failed to generate entropy"); - Mnemonic::from_entropy(&entropy).unwrap() +pub fn generate_entropy_mnemonic(word_count: Option) -> Mnemonic { + let word_count = word_count.unwrap_or(WordCount::Words24).word_count(); + Mnemonic::generate(word_count).expect("Failed to generate mnemonic") } pub(crate) fn read_or_generate_seed_file( @@ -627,9 +627,35 @@ mod tests { #[test] fn mnemonic_to_entropy_to_mnemonic() { - let mnemonic = generate_entropy_mnemonic(); - + // Test default (24 words) + let mnemonic = generate_entropy_mnemonic(None); let entropy = mnemonic.to_entropy(); assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); + assert_eq!(mnemonic.word_count(), 24); + + // Test with different word counts + let word_counts = [ + WordCount::Words12, + WordCount::Words15, + WordCount::Words18, + WordCount::Words21, + WordCount::Words24, + ]; + + for word_count in word_counts { + let mnemonic = generate_entropy_mnemonic(Some(word_count)); + let entropy = mnemonic.to_entropy(); + assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); + + // Verify expected word count + let expected_words = match word_count { + WordCount::Words12 => 12, + WordCount::Words15 => 15, + WordCount::Words18 => 18, + WordCount::Words21 => 21, + WordCount::Words24 => 24, + }; + assert_eq!(mnemonic.word_count(), expected_words); + } } } diff --git a/src/lib.rs b/src/lib.rs index 701a14dde..21fc93fe8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -156,6 +156,7 @@ use types::{ }; pub use types::{ ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, + WordCount, }; pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, diff --git a/src/types.rs b/src/types.rs index b8dc10b18..6d6bdcd20 100644 --- a/src/types.rs +++ b/src/types.rs @@ -36,6 +36,34 @@ use crate::logger::Logger; use crate::message_handler::NodeCustomMessageHandler; use crate::payment::PaymentDetails; +/// Supported BIP39 mnemonic word counts for entropy generation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum WordCount { + /// 12-word mnemonic (128-bit entropy) + Words12, + /// 15-word mnemonic (160-bit entropy) + Words15, + /// 18-word mnemonic (192-bit entropy) + Words18, + /// 21-word mnemonic (224-bit entropy) + Words21, + /// 24-word mnemonic (256-bit entropy) + Words24, +} + +impl WordCount { + /// Returns the word count as a usize value. + pub fn word_count(&self) -> usize { + match self { + WordCount::Words12 => 12, + WordCount::Words15 => 15, + WordCount::Words18 => 18, + WordCount::Words21 => 21, + WordCount::Words24 => 24, + } + } +} + /// A supertrait that requires that a type implements both [`KVStore`] and [`KVStoreSync`] at the /// same time. pub trait SyncAndAsyncKVStore: KVStore + KVStoreSync {} From ba335eb0867d4f693aa128208abb5ca76a38063a Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 22 Sep 2025 09:18:58 -0500 Subject: [PATCH 166/184] Add funding_txo to ChannelReady event When a channel is spliced, the existing funding transaction's output is spent and a new funding transaction output is formed. Once the splice is considered locked by both parties, LDK will emit a ChannelReady event which will include the new funding_txo. Additionally, the initial ChannelReady event now includes the original funding_txo. Include this data in LDK Node's ChannelReady event. --- bindings/ldk_node.udl | 2 +- src/event.rs | 44 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index d29f04d02..6c0603af6 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -393,7 +393,7 @@ interface Event { PaymentForwarded(ChannelId prev_channel_id, ChannelId next_channel_id, UserChannelId? prev_user_channel_id, UserChannelId? next_user_channel_id, PublicKey? prev_node_id, PublicKey? next_node_id, u64? total_fee_earned_msat, u64? skimmed_fee_msat, boolean claim_from_onchain_tx, u64? outbound_amount_forwarded_msat); ChannelPending(ChannelId channel_id, UserChannelId user_channel_id, ChannelId former_temporary_channel_id, PublicKey counterparty_node_id, OutPoint funding_txo); - ChannelReady(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id); + ChannelReady(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, OutPoint? funding_txo); ChannelClosed(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, ClosureReason? reason); }; diff --git a/src/event.rs b/src/event.rs index a55ddb7fd..375dc97ee 100644 --- a/src/event.rs +++ b/src/event.rs @@ -199,6 +199,10 @@ pub enum Event { funding_txo: OutPoint, }, /// A channel is ready to be used. + /// + /// This event is emitted when: + /// - A new channel has been established and is ready for use + /// - An existing channel has been spliced and is ready with the new funding output ChannelReady { /// The `channel_id` of the channel. channel_id: ChannelId, @@ -208,6 +212,14 @@ pub enum Event { /// /// This will be `None` for events serialized by LDK Node v0.1.0 and prior. counterparty_node_id: Option, + /// The outpoint of the channel's funding transaction. + /// + /// This represents the channel's current funding output, which may change when the + /// channel is spliced. For spliced channels, this will contain the new funding output + /// from the confirmed splice transaction. + /// + /// This will be `None` for events serialized by LDK Node v0.6.0 and prior. + funding_txo: Option, }, /// A channel has been closed. ChannelClosed { @@ -246,6 +258,7 @@ impl_writeable_tlv_based_enum!(Event, (0, channel_id, required), (1, counterparty_node_id, option), (2, user_channel_id, required), + (3, funding_txo, option), }, (4, ChannelPending) => { (0, channel_id, required), @@ -1397,14 +1410,28 @@ where } }, LdkEvent::ChannelReady { - channel_id, user_channel_id, counterparty_node_id, .. + channel_id, + user_channel_id, + counterparty_node_id, + funding_txo, + .. } => { - log_info!( - self.logger, - "Channel {} with counterparty {} ready to be used.", - channel_id, - counterparty_node_id, - ); + if let Some(funding_txo) = funding_txo { + log_info!( + self.logger, + "Channel {} with counterparty {} ready to be used with funding_txo {}", + channel_id, + counterparty_node_id, + funding_txo, + ); + } else { + log_info!( + self.logger, + "Channel {} with counterparty {} ready to be used", + channel_id, + counterparty_node_id, + ); + } if let Some(liquidity_source) = self.liquidity_source.as_ref() { liquidity_source @@ -1416,6 +1443,7 @@ where channel_id, user_channel_id: UserChannelId(user_channel_id), counterparty_node_id: Some(counterparty_node_id), + funding_txo, }; match self.event_queue.add_event(event).await { Ok(_) => {}, @@ -1655,6 +1683,7 @@ mod tests { channel_id: ChannelId([23u8; 32]), user_channel_id: UserChannelId(2323), counterparty_node_id: None, + funding_txo: None, }; event_queue.add_event(expected_event.clone()).await.unwrap(); @@ -1692,6 +1721,7 @@ mod tests { channel_id: ChannelId([23u8; 32]), user_channel_id: UserChannelId(2323), counterparty_node_id: None, + funding_txo: None, }; // Check `next_event_async` won't return if the queue is empty and always rather timeout. From c585275b4f59082ac9cf4fa7f370d9a4fbf1198c Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 21 Oct 2025 21:35:17 -0500 Subject: [PATCH 167/184] Add SplicePending and SpiceFailed events LDK introduced similar events with splicing. SplicePending is largely informational like ChannelPending. SpliceFailed indicates the used UTXOs can be reclaimed. This requires UTXO locking, which is not yet implemented. --- bindings/ldk_node.udl | 2 + src/event.rs | 111 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 103 insertions(+), 10 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 6c0603af6..ae2fa7555 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -395,6 +395,8 @@ interface Event { ChannelPending(ChannelId channel_id, UserChannelId user_channel_id, ChannelId former_temporary_channel_id, PublicKey counterparty_node_id, OutPoint funding_txo); ChannelReady(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, OutPoint? funding_txo); ChannelClosed(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, ClosureReason? reason); + SplicePending(ChannelId channel_id, UserChannelId user_channel_id, PublicKey counterparty_node_id, OutPoint new_funding_txo); + SpliceFailed(ChannelId channel_id, UserChannelId user_channel_id, PublicKey counterparty_node_id, OutPoint? abandoned_funding_txo); }; enum PaymentFailureReason { diff --git a/src/event.rs b/src/event.rs index 375dc97ee..566265d84 100644 --- a/src/event.rs +++ b/src/event.rs @@ -234,6 +234,28 @@ pub enum Event { /// This will be `None` for events serialized by LDK Node v0.2.1 and prior. reason: Option, }, + /// A channel splice is pending confirmation on-chain. + SplicePending { + /// The `channel_id` of the channel. + channel_id: ChannelId, + /// The `user_channel_id` of the channel. + user_channel_id: UserChannelId, + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The outpoint of the channel's splice funding transaction. + new_funding_txo: OutPoint, + }, + /// A channel splice has failed. + SpliceFailed { + /// The `channel_id` of the channel. + channel_id: ChannelId, + /// The `user_channel_id` of the channel. + user_channel_id: UserChannelId, + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The outpoint of the channel's splice funding transaction, if one was created. + abandoned_funding_txo: Option, + }, } impl_writeable_tlv_based_enum!(Event, @@ -291,7 +313,19 @@ impl_writeable_tlv_based_enum!(Event, (10, skimmed_fee_msat, option), (12, claim_from_onchain_tx, required), (14, outbound_amount_forwarded_msat, option), - } + }, + (8, SplicePending) => { + (1, channel_id, required), + (3, counterparty_node_id, required), + (5, user_channel_id, required), + (7, new_funding_txo, required), + }, + (9, SpliceFailed) => { + (1, channel_id, required), + (3, counterparty_node_id, required), + (5, user_channel_id, required), + (7, abandoned_funding_txo, option), + }, ); pub struct EventQueue @@ -1645,17 +1679,74 @@ where LdkEvent::FundingTransactionReadyForSigning { .. } => { debug_assert!(false, "We currently don't support interactive-tx, so this event should never be emitted."); }, - LdkEvent::SplicePending { .. } => { - debug_assert!( - false, - "We currently don't support splicing, so this event should never be emitted." + LdkEvent::SplicePending { + channel_id, + user_channel_id, + counterparty_node_id, + new_funding_txo, + .. + } => { + log_info!( + self.logger, + "Channel {} with counterparty {} pending splice with funding_txo {}", + channel_id, + counterparty_node_id, + new_funding_txo, ); + + let event = Event::SplicePending { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id, + new_funding_txo, + }; + + match self.event_queue.add_event(event).await { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to push to event queue: {}", e); + return Err(ReplayEvent()); + }, + }; }, - LdkEvent::SpliceFailed { .. } => { - debug_assert!( - false, - "We currently don't support splicing, so this event should never be emitted." - ); + LdkEvent::SpliceFailed { + channel_id, + user_channel_id, + counterparty_node_id, + abandoned_funding_txo, + .. + } => { + if let Some(funding_txo) = abandoned_funding_txo { + log_info!( + self.logger, + "Channel {} with counterparty {} failed splice with funding_txo {}", + channel_id, + counterparty_node_id, + funding_txo, + ); + } else { + log_info!( + self.logger, + "Channel {} with counterparty {} failed splice", + channel_id, + counterparty_node_id, + ); + } + + let event = Event::SpliceFailed { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id, + abandoned_funding_txo, + }; + + match self.event_queue.add_event(event).await { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to push to event queue: {}", e); + return Err(ReplayEvent()); + }, + }; }, } Ok(()) From 16d43cda23a805dda535d31261154460fea9402b Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 23 Oct 2025 12:45:46 -0500 Subject: [PATCH 168/184] Handle LdkEvent::FundingTransactionReadyForSigning When the interactive-tx construction protocol completes in LDK during splicing (and in the future dual-funding), LDK Node must provide signatures for any non-shared inputs belonging to its on-chain wallet. This commit implements this when handling the corresponding FundingTransactionReadyForSigning event. --- src/event.rs | 31 +++++++++++++++++++++++++++++-- src/wallet/mod.rs | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/src/event.rs b/src/event.rs index 566265d84..8a1499823 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1676,8 +1676,35 @@ where } } }, - LdkEvent::FundingTransactionReadyForSigning { .. } => { - debug_assert!(false, "We currently don't support interactive-tx, so this event should never be emitted."); + // TODO(splicing): Revisit error handling once splicing API is settled in LDK 0.3 + LdkEvent::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } => match self.wallet.sign_owned_inputs(unsigned_transaction) { + Ok(partially_signed_tx) => { + match self.channel_manager.funding_transaction_signed( + &channel_id, + &counterparty_node_id, + partially_signed_tx, + ) { + Ok(()) => { + log_info!( + self.logger, + "Signed funding transaction for channel {} with counterparty {}", + channel_id, + counterparty_node_id + ); + }, + Err(e) => { + // TODO(splicing): Abort splice once supported in LDK 0.3 + debug_assert!(false, "Failed signing funding transaction: {:?}", e); + log_error!(self.logger, "Failed signing funding transaction: {:?}", e); + }, + } + }, + Err(()) => log_error!(self.logger, "Failed signing funding transaction"), }, LdkEvent::SplicePending { channel_id, diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 0f3797431..db2d1cf9d 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -664,6 +664,43 @@ impl Wallet { Ok(address_info.address.script_pubkey()) } + #[allow(deprecated)] + pub(crate) fn sign_owned_inputs(&self, unsigned_tx: Transaction) -> Result { + let locked_wallet = self.inner.lock().unwrap(); + + let mut psbt = Psbt::from_unsigned_tx(unsigned_tx).map_err(|e| { + log_error!(self.logger, "Failed to construct PSBT: {}", e); + })?; + for (i, txin) in psbt.unsigned_tx.input.iter().enumerate() { + if let Some(utxo) = locked_wallet.get_utxo(txin.previous_output) { + debug_assert!(!utxo.is_spent); + psbt.inputs[i] = locked_wallet.get_psbt_input(utxo, None, true).map_err(|e| { + log_error!(self.logger, "Failed to construct PSBT input: {}", e); + })?; + } + } + + let mut sign_options = SignOptions::default(); + sign_options.trust_witness_utxo = true; + + match locked_wallet.sign(&mut psbt, sign_options) { + Ok(finalized) => debug_assert!(!finalized), + Err(e) => { + log_error!(self.logger, "Failed to sign owned inputs: {}", e); + return Err(()); + }, + } + + match psbt.extract_tx() { + Ok(tx) => Ok(tx), + Err(bitcoin::psbt::ExtractTxError::MissingInputValue { tx }) => Ok(tx), + Err(e) => { + log_error!(self.logger, "Failed to extract transaction: {}", e); + Err(()) + }, + } + } + #[allow(deprecated)] fn sign_psbt_inner(&self, mut psbt: Psbt) -> Result { let locked_wallet = self.inner.lock().unwrap(); From 14ae7531c1fc3dce3cc5a2b66ae5d9b4b46230d8 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 22 Sep 2025 10:40:22 -0500 Subject: [PATCH 169/184] Refactor funds checking logic into reusable method Extract the funds availability checking logic from open_channel_inner into a separate method so that it can be reused for channel splicing. --- src/lib.rs | 85 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 47 insertions(+), 38 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index cb13d5d9d..b7bc5cb40 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1072,50 +1072,14 @@ impl Node { let con_addr = peer_info.address.clone(); let con_cm = Arc::clone(&self.connection_manager); - let cur_anchor_reserve_sats = - total_anchor_channels_reserve_sats(&self.channel_manager, &self.config); - let spendable_amount_sats = - self.wallet.get_spendable_amount_sats(cur_anchor_reserve_sats).unwrap_or(0); - - // Fail early if we have less than the channel value available. - if spendable_amount_sats < channel_amount_sats { - log_error!(self.logger, - "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", - spendable_amount_sats, channel_amount_sats - ); - return Err(Error::InsufficientFunds); - } - // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. self.runtime.block_on(async move { con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; - // Fail if we have less than the channel value + anchor reserve available (if applicable). - let init_features = self - .peer_manager - .peer_by_node_id(&node_id) - .ok_or(Error::ConnectionFailed)? - .init_features; - let required_funds_sats = channel_amount_sats - + self.config.anchor_channels_config.as_ref().map_or(0, |c| { - if init_features.requires_anchors_zero_fee_htlc_tx() - && !c.trusted_peers_no_reserve.contains(&node_id) - { - c.per_channel_reserve_sats - } else { - 0 - } - }); - - if spendable_amount_sats < required_funds_sats { - log_error!(self.logger, - "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", - spendable_amount_sats, required_funds_sats - ); - return Err(Error::InsufficientFunds); - } + // Check funds availability after connection (includes anchor reserve calculation) + self.check_sufficient_funds_for_channel(channel_amount_sats, &node_id)?; let mut user_config = default_user_config(&self.config); user_config.channel_handshake_config.announce_for_forwarding = announce_for_forwarding; @@ -1156,6 +1120,51 @@ impl Node { } } + fn check_sufficient_funds_for_channel( + &self, amount_sats: u64, peer_node_id: &PublicKey, + ) -> Result<(), Error> { + let cur_anchor_reserve_sats = + total_anchor_channels_reserve_sats(&self.channel_manager, &self.config); + let spendable_amount_sats = + self.wallet.get_spendable_amount_sats(cur_anchor_reserve_sats).unwrap_or(0); + + // Fail early if we have less than the channel value available. + if spendable_amount_sats < amount_sats { + log_error!(self.logger, + "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", + spendable_amount_sats, amount_sats + ); + return Err(Error::InsufficientFunds); + } + + // Fail if we have less than the channel value + anchor reserve available (if applicable). + let init_features = self + .peer_manager + .peer_by_node_id(peer_node_id) + .ok_or(Error::ConnectionFailed)? + .init_features; + let required_funds_sats = amount_sats + + self.config.anchor_channels_config.as_ref().map_or(0, |c| { + if init_features.requires_anchors_zero_fee_htlc_tx() + && !c.trusted_peers_no_reserve.contains(peer_node_id) + { + c.per_channel_reserve_sats + } else { + 0 + } + }); + + if spendable_amount_sats < required_funds_sats { + log_error!(self.logger, + "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", + spendable_amount_sats, required_funds_sats + ); + return Err(Error::InsufficientFunds); + } + + Ok(()) + } + /// Connect to a node and open a new unannounced channel. /// /// To open an announced channel, see [`Node::open_announced_channel`]. From 1a576d07238e7d21b05a6cfa46bd223a1b53489d Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Mon, 22 Sep 2025 12:26:18 -0500 Subject: [PATCH 170/184] Add Node::splice_in method Instead of closing and re-opening a channel when outbound liquidity is exhausted, splicing allows to adding more funds (splice-in) while keeping the channel operational. This commit implements splice-in using funds from the BDK on-chain wallet. --- bindings/ldk_node.udl | 3 ++ src/builder.rs | 1 + src/error.rs | 3 ++ src/event.rs | 12 +++++ src/lib.rs | 121 +++++++++++++++++++++++++++++++++++++++++- src/wallet/mod.rs | 74 ++++++++++++++++++++++++-- 6 files changed, 209 insertions(+), 5 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index ae2fa7555..97c808481 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -150,6 +150,8 @@ interface Node { [Throws=NodeError] UserChannelId open_announced_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config); [Throws=NodeError] + void splice_in([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, u64 splice_amount_sats); + [Throws=NodeError] void close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); [Throws=NodeError] void force_close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, string? reason); @@ -290,6 +292,7 @@ enum NodeError { "ProbeSendingFailed", "ChannelCreationFailed", "ChannelClosingFailed", + "ChannelSplicingFailed", "ChannelConfigUpdateFailed", "PersistenceFailed", "FeerateEstimationUpdateFailed", diff --git a/src/builder.rs b/src/builder.rs index 183c7513b..63e84db37 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1795,6 +1795,7 @@ fn build_with_store_internal( wallet, chain_source, tx_broadcaster, + fee_estimator, event_queue, channel_manager, chain_monitor, diff --git a/src/error.rs b/src/error.rs index 7e9dbac20..20b1cceab 100644 --- a/src/error.rs +++ b/src/error.rs @@ -43,6 +43,8 @@ pub enum Error { ChannelCreationFailed, /// A channel could not be closed. ChannelClosingFailed, + /// A channel could not be spliced. + ChannelSplicingFailed, /// A channel configuration could not be updated. ChannelConfigUpdateFailed, /// Persistence failed. @@ -145,6 +147,7 @@ impl fmt::Display for Error { Self::ProbeSendingFailed => write!(f, "Failed to send the given payment probe."), Self::ChannelCreationFailed => write!(f, "Failed to create channel."), Self::ChannelClosingFailed => write!(f, "Failed to close channel."), + Self::ChannelSplicingFailed => write!(f, "Failed to splice channel."), Self::ChannelConfigUpdateFailed => write!(f, "Failed to update channel config."), Self::PersistenceFailed => write!(f, "Failed to persist data."), Self::FeerateEstimationUpdateFailed => { diff --git a/src/event.rs b/src/event.rs index 8a1499823..41f76f216 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1741,6 +1741,7 @@ where user_channel_id, counterparty_node_id, abandoned_funding_txo, + contributed_outputs, .. } => { if let Some(funding_txo) = abandoned_funding_txo { @@ -1760,6 +1761,17 @@ where ); } + let tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![], + output: contributed_outputs, + }; + if let Err(e) = self.wallet.cancel_tx(&tx) { + log_error!(self.logger, "Failed reclaiming unused addresses: {}", e); + return Err(ReplayEvent()); + } + let event = Event::SpliceFailed { channel_id, user_channel_id: UserChannelId(user_channel_id), diff --git a/src/lib.rs b/src/lib.rs index b7bc5cb40..56f2c0da0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -109,6 +109,7 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; use bitcoin::secp256k1::PublicKey; +use bitcoin::Amount; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; pub use builder::BuildError; @@ -124,6 +125,7 @@ pub use error::Error as NodeError; use error::Error; pub use event::Event; use event::{EventHandler, EventQueue}; +use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; #[cfg(feature = "uniffi")] use ffi::*; use gossip::GossipSource; @@ -131,10 +133,12 @@ use graph::NetworkGraph; pub use io::utils::generate_entropy_mnemonic; use io::utils::write_node_metrics; use lightning::chain::BestBlock; -use lightning::events::bump_transaction::Wallet as LdkWallet; +use lightning::events::bump_transaction::{Input, Wallet as LdkWallet}; use lightning::impl_writeable_tlv_based; +use lightning::ln::chan_utils::{make_funding_redeemscript, FUNDING_TRANSACTION_WITNESS_WEIGHT}; use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; use lightning::ln::channelmanager::PaymentId; +use lightning::ln::funding::SpliceContribution; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; use lightning::util::persist::KVStoreSync; @@ -179,6 +183,7 @@ pub struct Node { wallet: Arc, chain_source: Arc, tx_broadcaster: Arc, + fee_estimator: Arc, event_queue: Arc>>, channel_manager: Arc, chain_monitor: Arc, @@ -1236,6 +1241,120 @@ impl Node { ) } + /// Add funds from the on-chain wallet into an existing channel. + /// + /// This provides for increasing a channel's outbound liquidity without re-balancing or closing + /// it. Once negotiation with the counterparty is complete, the channel remains operational + /// while waiting for a new funding transaction to confirm. + /// + /// # Experimental API + /// + /// This API is experimental. Currently, a splice-in will be marked as an outbound payment, but + /// this classification may change in the future. + pub fn splice_in( + &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, + splice_amount_sats: u64, + ) -> Result<(), Error> { + let open_channels = + self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); + if let Some(channel_details) = + open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) + { + self.check_sufficient_funds_for_channel(splice_amount_sats, &counterparty_node_id)?; + + const EMPTY_SCRIPT_SIG_WEIGHT: u64 = + 1 /* empty script_sig */ * bitcoin::constants::WITNESS_SCALE_FACTOR as u64; + + // Used for creating a redeem script for the previous funding txo and the new funding + // txo. Only needed when selecting which UTXOs to include in the funding tx that would + // be sufficient to pay for fees. Hence, the value does not matter. + let dummy_pubkey = PublicKey::from_slice(&[2; 33]).unwrap(); + + let funding_txo = channel_details.funding_txo.ok_or_else(|| { + log_error!(self.logger, "Failed to splice channel: channel not yet ready",); + Error::ChannelSplicingFailed + })?; + + let shared_input = Input { + outpoint: funding_txo.into_bitcoin_outpoint(), + previous_utxo: bitcoin::TxOut { + value: Amount::from_sat(channel_details.channel_value_satoshis), + script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey) + .to_p2wsh(), + }, + satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + FUNDING_TRANSACTION_WITNESS_WEIGHT, + }; + + let shared_output = bitcoin::TxOut { + value: shared_input.previous_utxo.value + Amount::from_sat(splice_amount_sats), + script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey).to_p2wsh(), + }; + + let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); + + let inputs = self + .wallet + .select_confirmed_utxos(vec![shared_input], &[shared_output], fee_rate) + .map_err(|()| { + log_error!( + self.logger, + "Failed to splice channel: insufficient confirmed UTXOs", + ); + Error::ChannelSplicingFailed + })?; + + let change_address = self.wallet.get_new_internal_address()?; + + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(splice_amount_sats), + inputs, + change_script: Some(change_address.script_pubkey()), + }; + + let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { + Ok(fee_rate) => fee_rate, + Err(_) => { + debug_assert!(false); + fee_estimator::get_fallback_rate_for_target(ConfirmationTarget::ChannelFunding) + }, + }; + + self.channel_manager + .splice_channel( + &channel_details.channel_id, + &counterparty_node_id, + contribution, + funding_feerate_per_kw, + None, + ) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {:?}", e); + let tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![], + output: vec![bitcoin::TxOut { + value: Amount::ZERO, + script_pubkey: change_address.script_pubkey(), + }], + }; + match self.wallet.cancel_tx(&tx) { + Ok(()) => Error::ChannelSplicingFailed, + Err(e) => e, + } + }) + } else { + log_error!( + self.logger, + "Channel not found for user_channel_id {} and counterparty {}", + user_channel_id, + counterparty_node_id + ); + + Err(Error::ChannelSplicingFailed) + } + } + /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate /// cache. /// diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index db2d1cf9d..e26f87e90 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -6,11 +6,13 @@ // accordance with one or both of these licenses. use std::future::Future; +use std::ops::Deref; use std::pin::Pin; use std::str::FromStr; use std::sync::{Arc, Mutex}; use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; +use bdk_wallet::descriptor::ExtendedDescriptor; #[allow(deprecated)] use bdk_wallet::SignOptions; use bdk_wallet::{Balance, KeychainKind, PersistedWallet, Update}; @@ -19,19 +21,20 @@ use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::hashes::Hash; use bitcoin::key::XOnlyPublicKey; -use bitcoin::psbt::Psbt; +use bitcoin::psbt::{self, Psbt}; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use bitcoin::secp256k1::{All, PublicKey, Scalar, Secp256k1, SecretKey}; use bitcoin::{ - Address, Amount, FeeRate, Network, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, + Address, Amount, FeeRate, Network, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, WitnessProgram, WitnessVersion, }; use lightning::chain::chaininterface::BroadcasterInterface; use lightning::chain::channelmonitor::ANTI_REORG_DELAY; use lightning::chain::{BestBlock, Listen}; -use lightning::events::bump_transaction::{Utxo, WalletSource}; +use lightning::events::bump_transaction::{Input, Utxo, WalletSource}; use lightning::ln::channelmanager::PaymentId; +use lightning::ln::funding::FundingTxInput; use lightning::ln::inbound_payment::ExpandedKey; use lightning::ln::msgs::UnsignedGossipMessage; use lightning::ln::script::ShutdownScript; @@ -285,7 +288,7 @@ impl Wallet { Ok(address_info.address) } - fn get_new_internal_address(&self) -> Result { + pub(crate) fn get_new_internal_address(&self) -> Result { let mut locked_wallet = self.inner.lock().unwrap(); let mut locked_persister = self.persister.lock().unwrap(); @@ -297,6 +300,19 @@ impl Wallet { Ok(address_info.address) } + pub(crate) fn cancel_tx(&self, tx: &Transaction) -> Result<(), Error> { + let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_persister = self.persister.lock().unwrap(); + + locked_wallet.cancel_tx(tx); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + } + pub(crate) fn get_balances( &self, total_anchor_channels_reserve_sats: u64, ) -> Result<(u64, u64), Error> { @@ -559,6 +575,56 @@ impl Wallet { Ok(txid) } + pub(crate) fn select_confirmed_utxos( + &self, must_spend: Vec, must_pay_to: &[TxOut], fee_rate: FeeRate, + ) -> Result, ()> { + let mut locked_wallet = self.inner.lock().unwrap(); + debug_assert!(matches!( + locked_wallet.public_descriptor(KeychainKind::External), + ExtendedDescriptor::Wpkh(_) + )); + debug_assert!(matches!( + locked_wallet.public_descriptor(KeychainKind::Internal), + ExtendedDescriptor::Wpkh(_) + )); + + let mut tx_builder = locked_wallet.build_tx(); + tx_builder.only_witness_utxo(); + + for input in &must_spend { + let psbt_input = psbt::Input { + witness_utxo: Some(input.previous_utxo.clone()), + ..Default::default() + }; + let weight = Weight::from_wu(input.satisfaction_weight); + tx_builder.add_foreign_utxo(input.outpoint, psbt_input, weight).map_err(|_| ())?; + } + + for output in must_pay_to { + tx_builder.add_recipient(output.script_pubkey.clone(), output.value); + } + + tx_builder.fee_rate(fee_rate); + tx_builder.exclude_unconfirmed(); + + tx_builder + .finish() + .map_err(|e| { + log_error!(self.logger, "Failed to select confirmed UTXOs: {}", e); + })? + .unsigned_tx + .input + .iter() + .filter(|txin| must_spend.iter().all(|input| input.outpoint != txin.previous_output)) + .filter_map(|txin| { + locked_wallet + .tx_details(txin.previous_output.txid) + .map(|tx_details| tx_details.tx.deref().clone()) + .map(|prevtx| FundingTxInput::new_p2wpkh(prevtx, txin.previous_output.vout)) + }) + .collect::, ()>>() + } + fn list_confirmed_utxos_inner(&self) -> Result, ()> { let locked_wallet = self.inner.lock().unwrap(); let mut utxos = Vec::new(); From 04d1c3952355c2ed997bdd491d91f6bc9cd0b5a6 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 23 Oct 2025 16:04:46 -0500 Subject: [PATCH 171/184] Add Node::splice_out method Instead of closing and re-opening a channel when on-chain funds are needed, splicing allows removing funds (splice-out) while keeping the channel operational. This commit implements splice-out sending funds to a user-provided on-chain address. --- bindings/ldk_node.udl | 2 ++ src/lib.rs | 68 ++++++++++++++++++++++++++++++++++++++++++- src/wallet/mod.rs | 10 +++---- 3 files changed, 73 insertions(+), 7 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 97c808481..ff2469c7e 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -152,6 +152,8 @@ interface Node { [Throws=NodeError] void splice_in([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, u64 splice_amount_sats); [Throws=NodeError] + void splice_out([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, [ByRef]Address address, u64 splice_amount_sats); + [Throws=NodeError] void close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); [Throws=NodeError] void force_close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, string? reason); diff --git a/src/lib.rs b/src/lib.rs index 56f2c0da0..8ac6780ed 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -109,7 +109,7 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; use bitcoin::secp256k1::PublicKey; -use bitcoin::Amount; +use bitcoin::{Address, Amount}; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; pub use builder::BuildError; @@ -1355,6 +1355,72 @@ impl Node { } } + /// Remove funds from an existing channel, sending them to an on-chain address. + /// + /// This provides for decreasing a channel's outbound liquidity without re-balancing or closing + /// it. Once negotiation with the counterparty is complete, the channel remains operational + /// while waiting for a new funding transaction to confirm. + /// + /// # Experimental API + /// + /// This API is experimental. Currently, a splice-out will be marked as an inbound payment if + /// paid to an address associated with the on-chain wallet, but this classification may change + /// in the future. + pub fn splice_out( + &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, address: &Address, + splice_amount_sats: u64, + ) -> Result<(), Error> { + let open_channels = + self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); + if let Some(channel_details) = + open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) + { + if splice_amount_sats > channel_details.outbound_capacity_msat { + return Err(Error::ChannelSplicingFailed); + } + + self.wallet.parse_and_validate_address(address)?; + + let contribution = SpliceContribution::SpliceOut { + outputs: vec![bitcoin::TxOut { + value: Amount::from_sat(splice_amount_sats), + script_pubkey: address.script_pubkey(), + }], + }; + + let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); + let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { + Ok(fee_rate) => fee_rate, + Err(_) => { + debug_assert!(false, "FeeRate should always fit within u32"); + log_error!(self.logger, "FeeRate should always fit within u32"); + fee_estimator::get_fallback_rate_for_target(ConfirmationTarget::ChannelFunding) + }, + }; + + self.channel_manager + .splice_channel( + &channel_details.channel_id, + &counterparty_node_id, + contribution, + funding_feerate_per_kw, + None, + ) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {:?}", e); + Error::ChannelSplicingFailed + }) + } else { + log_error!( + self.logger, + "Channel not found for user_channel_id {} and counterparty {}", + user_channel_id, + counterparty_node_id + ); + Err(Error::ChannelSplicingFailed) + } + } + /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate /// cache. /// diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index e26f87e90..2f8daa500 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -26,7 +26,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use bitcoin::secp256k1::{All, PublicKey, Scalar, Secp256k1, SecretKey}; use bitcoin::{ - Address, Amount, FeeRate, Network, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, + Address, Amount, FeeRate, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, WitnessProgram, WitnessVersion, }; use lightning::chain::chaininterface::BroadcasterInterface; @@ -348,12 +348,10 @@ impl Wallet { self.get_balances(total_anchor_channels_reserve_sats).map(|(_, s)| s) } - fn parse_and_validate_address( - &self, network: Network, address: &Address, - ) -> Result { + pub(crate) fn parse_and_validate_address(&self, address: &Address) -> Result { Address::::from_str(address.to_string().as_str()) .map_err(|_| Error::InvalidAddress)? - .require_network(network) + .require_network(self.config.network) .map_err(|_| Error::InvalidAddress) } @@ -362,7 +360,7 @@ impl Wallet { &self, address: &bitcoin::Address, send_amount: OnchainSendAmount, fee_rate: Option, ) -> Result { - self.parse_and_validate_address(self.config.network, &address)?; + self.parse_and_validate_address(&address)?; // Use the set fee_rate or default to fee estimation. let confirmation_target = ConfirmationTarget::OnchainPayment; From a5c9cbf63dcb02e51b1efdc0ae53e0b0c777a5d9 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 29 Oct 2025 12:42:46 -0500 Subject: [PATCH 172/184] Accept inbound splice attempts Since LDK Node does not support downgrades, there's no need to have a Config parameter for accepting inbound splices. Instead, enable it by default. --- src/config.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/config.rs b/src/config.rs index ce361c45a..510bcc875 100644 --- a/src/config.rs +++ b/src/config.rs @@ -325,6 +325,7 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { user_config.manually_accept_inbound_channels = true; user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = config.anchor_channels_config.is_some(); + user_config.reject_inbound_splices = false; if may_announce_channel(config).is_err() { user_config.accept_forwards_to_priv_channels = false; From d9ebd344ea075d3427813ddc732b0beb9c051c27 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 23 Oct 2025 13:03:36 -0500 Subject: [PATCH 173/184] Add an integration test for splicing --- tests/integration_tests_rust.rs | 145 ++++++++++++++++++++++++++++++++ 1 file changed, 145 insertions(+) diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 69df12710..2d487da06 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -925,6 +925,151 @@ async fn concurrent_connections_succeed() { } } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn splice_channel() { + macro_rules! expect_splice_pending_event { + ($node: expr, $counterparty_node_id: expr) => {{ + match $node.next_event_async().await { + ref e @ Event::SplicePending { new_funding_txo, counterparty_node_id, .. } => { + println!("{} got event {:?}", $node.node_id(), e); + assert_eq!(counterparty_node_id, $counterparty_node_id); + $node.event_handled().unwrap(); + new_funding_txo + }, + ref e => { + panic!("{} got unexpected event!: {:?}", std::stringify!($node), e); + }, + } + }}; + } + + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let address_b = node_b.onchain_payment().new_address().unwrap(); + let premine_amount_sat = 5_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a, address_b], + Amount::from_sat(premine_amount_sat), + ) + .await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + assert_eq!(node_a.list_balances().total_onchain_balance_sats, premine_amount_sat); + assert_eq!(node_b.list_balances().total_onchain_balance_sats, premine_amount_sat); + + open_channel(&node_a, &node_b, 4_000_000, false, &electrsd).await; + + // Open a channel with Node A contributing the funding + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + let user_channel_id_a = expect_channel_ready_event!(node_a, node_b.node_id()); + let user_channel_id_b = expect_channel_ready_event!(node_b, node_a.node_id()); + + let opening_transaction_fee_sat = 156; + let closing_transaction_fee_sat = 614; + let anchor_output_sat = 330; + + assert_eq!( + node_a.list_balances().total_onchain_balance_sats, + premine_amount_sat - 4_000_000 - opening_transaction_fee_sat + ); + assert_eq!( + node_a.list_balances().total_lightning_balance_sats, + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat + ); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 0); + + // Test that splicing and payments fail when there are insufficient funds + let address = node_b.onchain_payment().new_address().unwrap(); + let amount_msat = 400_000_000; + + assert_eq!( + node_b.splice_in(&user_channel_id_b, node_b.node_id(), 5_000_000), + Err(NodeError::ChannelSplicingFailed), + ); + assert_eq!( + node_b.splice_out(&user_channel_id_b, node_b.node_id(), &address, amount_msat / 1000), + Err(NodeError::ChannelSplicingFailed), + ); + assert_eq!( + node_b.spontaneous_payment().send(amount_msat, node_a.node_id(), None), + Err(NodeError::PaymentSendingFailed) + ); + + // Splice-in funds for Node B so that it has outbound liquidity to make a payment + node_b.splice_in(&user_channel_id_b, node_a.node_id(), 4_000_000).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let splice_in_fee_sat = 252; + + assert_eq!( + node_b.list_balances().total_onchain_balance_sats, + premine_amount_sat - 4_000_000 - splice_in_fee_sat + ); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 4_000_000); + + let payment_id = + node_b.spontaneous_payment().send(amount_msat, node_a.node_id(), None).unwrap(); + + expect_payment_successful_event!(node_b, Some(payment_id), None); + expect_payment_received_event!(node_a, amount_msat); + + // Mine a block to give time for the HTLC to resolve + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; + + assert_eq!( + node_a.list_balances().total_lightning_balance_sats, + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat + amount_msat / 1000 + ); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 4_000_000 - amount_msat / 1000); + + // Splice-out funds for Node A from the payment sent by Node B + let address = node_a.onchain_payment().new_address().unwrap(); + node_a.splice_out(&user_channel_id_a, node_b.node_id(), &address, amount_msat / 1000).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let splice_out_fee_sat = 183; + + assert_eq!( + node_a.list_balances().total_onchain_balance_sats, + premine_amount_sat - 4_000_000 - opening_transaction_fee_sat + amount_msat / 1000 + ); + assert_eq!( + node_a.list_balances().total_lightning_balance_sats, + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat - splice_out_fee_sat + ); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); From 204e04d80fccd3d7d6f6a0df33850128f4342d83 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 29 Oct 2025 17:18:33 -0500 Subject: [PATCH 174/184] Test splicing in do_channel_full_cycle --- tests/common/mod.rs | 78 ++++++++++++++++++++++++++++++--- tests/integration_tests_rust.rs | 24 ++-------- 2 files changed, 76 insertions(+), 26 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index f023da680..699f8f1d0 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -99,6 +99,24 @@ macro_rules! expect_channel_ready_event { pub(crate) use expect_channel_ready_event; +macro_rules! expect_splice_pending_event { + ($node: expr, $counterparty_node_id: expr) => {{ + match $node.next_event_async().await { + ref e @ Event::SplicePending { new_funding_txo, counterparty_node_id, .. } => { + println!("{} got event {:?}", $node.node_id(), e); + assert_eq!(counterparty_node_id, $counterparty_node_id); + $node.event_handled().unwrap(); + new_funding_txo + }, + ref e => { + panic!("{} got unexpected event!: {:?}", std::stringify!($node), e); + }, + } + }}; +} + +pub(crate) use expect_splice_pending_event; + macro_rules! expect_payment_received_event { ($node:expr, $amount_msat:expr) => {{ match $node.next_event_async().await { @@ -795,8 +813,8 @@ pub(crate) async fn do_channel_full_cycle( node_b_anchor_reserve_sat ); - let user_channel_id = expect_channel_ready_event!(node_a, node_b.node_id()); - expect_channel_ready_event!(node_b, node_a.node_id()); + let user_channel_id_a = expect_channel_ready_event!(node_a, node_b.node_id()); + let user_channel_id_b = expect_channel_ready_event!(node_b, node_a.node_id()); println!("\nB receive"); let invoice_amount_1_msat = 2500_000; @@ -1085,12 +1103,60 @@ pub(crate) async fn do_channel_full_cycle( 1 ); + // Mine a block to give time for the HTLC to resolve + generate_blocks_and_wait(&bitcoind, electrsd, 1).await; + + println!("\nB splices out to pay A"); + let addr_a = node_a.onchain_payment().new_address().unwrap(); + let splice_out_sat = funding_amount_sat / 2; + node_b.splice_out(&user_channel_id_b, node_a.node_id(), &addr_a, splice_out_sat).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind, electrsd, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + assert_eq!( + node_a + .list_payments_with_filter(|p| p.direction == PaymentDirection::Inbound + && matches!(p.kind, PaymentKind::Onchain { .. })) + .len(), + 2 + ); + + println!("\nA splices in the splice-out payment from B"); + let splice_in_sat = splice_out_sat; + node_a.splice_in(&user_channel_id_a, node_b.node_id(), splice_in_sat).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind, electrsd, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + assert_eq!( + node_a + .list_payments_with_filter(|p| p.direction == PaymentDirection::Outbound + && matches!(p.kind, PaymentKind::Onchain { .. })) + .len(), + 2 + ); + println!("\nB close_channel (force: {})", force_close); if force_close { tokio::time::sleep(Duration::from_secs(1)).await; - node_a.force_close_channel(&user_channel_id, node_b.node_id(), None).unwrap(); + node_a.force_close_channel(&user_channel_id_a, node_b.node_id(), None).unwrap(); } else { - node_a.close_channel(&user_channel_id, node_b.node_id()).unwrap(); + node_a.close_channel(&user_channel_id_a, node_b.node_id()).unwrap(); } expect_event!(node_a, ChannelClosed); @@ -1189,7 +1255,7 @@ pub(crate) async fn do_channel_full_cycle( + invoice_amount_3_msat + determined_amount_msat + keysend_amount_msat) - / 1000; + / 1000 - splice_out_sat; let node_a_upper_bound_sat = (premine_amount_sat - funding_amount_sat) + (funding_amount_sat - sum_of_all_payments_sat); let node_a_lower_bound_sat = node_a_upper_bound_sat - onchain_fee_buffer_sat; @@ -1210,7 +1276,7 @@ pub(crate) async fn do_channel_full_cycle( .list_payments_with_filter(|p| p.direction == PaymentDirection::Inbound && matches!(p.kind, PaymentKind::Onchain { .. })) .len(), - 2 + 3 ); assert_eq!( node_b diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 2d487da06..d6c7c9447 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -20,10 +20,10 @@ use common::{ bump_fee_and_broadcast, distribute_funds_unconfirmed, do_channel_full_cycle, expect_channel_pending_event, expect_channel_ready_event, expect_event, expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, - generate_blocks_and_wait, open_channel, open_channel_push_amt, premine_and_distribute_funds, - premine_blocks, prepare_rbf, random_config, random_listening_addresses, - setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_node_for_async_payments, - setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, + expect_splice_pending_event, generate_blocks_and_wait, open_channel, open_channel_push_amt, + premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, + random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, + setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, }; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::liquidity::LSPS2ServiceConfig; @@ -927,22 +927,6 @@ async fn concurrent_connections_succeed() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn splice_channel() { - macro_rules! expect_splice_pending_event { - ($node: expr, $counterparty_node_id: expr) => {{ - match $node.next_event_async().await { - ref e @ Event::SplicePending { new_funding_txo, counterparty_node_id, .. } => { - println!("{} got event {:?}", $node.node_id(), e); - assert_eq!(counterparty_node_id, $counterparty_node_id); - $node.event_handled().unwrap(); - new_funding_txo - }, - ref e => { - panic!("{} got unexpected event!: {:?}", std::stringify!($node), e); - }, - } - }}; - } - let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); From 998c69e822483e129e20d0e6aac20220a41d9997 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 19 Nov 2025 09:18:55 +0100 Subject: [PATCH 175/184] Expand docs on `LSPS2ServiceConfig::client_trusts_lsp` field Previously the docs have been a bit sparse. Now that we actually implement the client-trusts-LSP flow, we should expand a bit on what the bool actually does. --- src/liquidity.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/liquidity.rs b/src/liquidity.rs index ee520e14d..74e6098dd 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -130,7 +130,18 @@ pub struct LSPS2ServiceConfig { pub min_payment_size_msat: u64, /// The maximum payment size that we will accept when opening a channel. pub max_payment_size_msat: u64, - /// Use the client trusts lsp model + /// Use the 'client-trusts-LSP' trust model. + /// + /// When set, the service will delay *broadcasting* the JIT channel's funding transaction until + /// the client claimed sufficient HTLC parts to pay for the channel open. + /// + /// Note this will render the flow incompatible with clients utilizing the 'LSP-trust-client' + /// trust model, i.e., in turn delay *claiming* any HTLCs until they see the funding + /// transaction in the mempool. + /// + /// Please refer to [`bLIP-52`] for more information. + /// + /// [`bLIP-52`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models pub client_trusts_lsp: bool, } From fe14855a07c11b4794643d84cf82d29939399652 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 19 Nov 2025 11:16:55 +0100 Subject: [PATCH 176/184] Add draft changelog for LDK Node v0.7.0 --- CHANGELOG.md | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 05813b621..d03401d85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,56 @@ +# 0.7.0 - TODO +This seventh minor release introduces numerous new features, bug fixes, and API improvements. In particular, it adds support for channel Splicing, Async Payments, as well as sourcing chain data from a Bitcoin Core REST backend. + +## Feature and API updates +- Experimental support for channel splicing has been added. (#677) + - **Note**: Splicing-related transactions might currently still get misclassified in the payment store. +- Support for serving and paying static invoices for Async Payments has been added. (#621, #632) +- Sourcing chain data via Bitcoin Core's REST interface is now supported. (#526) +- A new `Builder::set_chain_source_esplora_with_headers` method has been added + that allows specifying headers to be sent to the Esplora backend. (#596) +- The ability to import and merge pathfinding scores has been added. (#449) +- Passing a custom pre-image when sending spontaneous payments is now supported. (#549) +- When running in the context of a `tokio` runtime, we now attempt to reuse the + outer runtime context for our main runtime. (#543) +- Specifying a `RouteParametersConfig` when paying BOLT12 offers or sending refunds is now supported. (#702) +- Liquidity service data is now persisted across restarts. (#650) +- The bLIP-52/LSPS2 service now supports the 'client-trusts-LSP' model. (#687) +- The manual-claiming flow is now also supported for JIT invoices. (#608) +- Any key-value stores provided to `Builder::build_with_store` are now + required to implement LDK's `KVStore` as well as `KVStoreSync` interfaces. + (#633) +- The `generate_entropy_mnemonic` method now supports specifying a word count. (#699) + +## Bug Fixes and Improvements +- Robustness of the shutdown procedure has been improved, minimizing risk of blocking during `Node::stop`. (#592, #612, #619, #622) +- The VSS storage backend now supports 'lazy' deletes, allowing it to avoid unnecessary remote calls for certain operations. (#689) +- The encryption and obfuscation scheme used when storing data against a VSS backend has been improved. (#627) +- Transient errors during `bitcoind` RPC chain synchronization are now retried with an exponential back-off. (#588) +- Transactions evicted from the mempool are now correctly handled when syncing via `bitcoind` RPC/REST. (#605) +- When sourcing chain data from a Bitcoin Core backend, we now poll for the + current tip in `Builder::build`, avoiding re-validating the chain from + genesis on first startup. (#706) +- A bug that could result in the node hanging on shutdown when sourcing chain data from a Bitcoin Core backend has been fixed. (#682) +- Unnecessary fee estimation calls to Bitcoin Core RPC are now avoided. (#631) +- The node now persists differential updates instead of re-persisting full channel monitor, reducing IO load. (#661) +- The previously rather restrictive `MaximumFeeEstimate` was relaxed. (#629) +- The node now listens on all provided listening addresses. (#644) + +## Compatibility Notes +- The minimum supported Rust version (MSRV) has been bumped to `rustc` v1.85 (#606) +- The LDK dependency has been bumped to v0.2. +- The BDK dependency has been bumped to v2.2. (#656) +- The VSS client dependency has been updated to utilize the new `vss-client-ng` crate v0.4. (#627) +- The `rust-bitcoin` dependency has been bumped to v0.32.7. (#656) +- The `uniffi` dependency has been bumped to v0.28.3. (#591) +- The `electrum-client` dependency has been bumped to v0.24.0. (#602) +- For Kotlin/Android builds we now require 16kb page sizes, ensuring Play Store compatibility. (#625) + +In total, this release features TODO files changed, TODO insertions, TODO +deletions in TODO commits from TODO authors in alphabetical order: + +- TODO TODO + # 0.6.2 - Aug. 14, 2025 This patch release fixes a panic that could have been hit when syncing to a TLS-enabled Electrum server, as well as some minor issues when shutting down From ea2cd3ef07b0d2c71cbe9f9b95d2be589a4015b0 Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Fri, 21 Nov 2025 12:52:09 +0700 Subject: [PATCH 177/184] fix: compile errors --- bindings/ldk_node.udl | 2 +- src/lib.rs | 21 +++++++++++++-------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index e44ba51cf..547d045fc 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -245,7 +245,7 @@ interface Bolt12Payment { interface SpontaneousPayment { // Alby: custom TLV & preimage (TODO: update to use send_with_preimage_and_custom_tlvs) [Throws=NodeError] - PaymentId send_with_tlvs_and_preimage(u64 amount_msat, PublicKey node_id, SendingParameters? sending_parameters, sequence custom_tlvs, PaymentPreimage? preimage); + PaymentId send_with_tlvs_and_preimage(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters, sequence custom_tlvs, PaymentPreimage? preimage); //PaymentId send(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters); //[Throws=NodeError] //PaymentId send_with_custom_tlvs(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters, sequence custom_tlvs); diff --git a/src/lib.rs b/src/lib.rs index e99719f70..583aa9fae 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -127,8 +127,14 @@ use chain::ChainSource; use config::{ //default_user_config, may_announce_channel, ChannelConfig, Config, //BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS, LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, - default_user_config, may_announce_channel, AsyncPaymentsRole, ChannelConfig, Config, - NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, + default_user_config, + may_announce_channel, + AsyncPaymentsRole, + ChannelConfig, + Config, + NODE_ANN_BCAST_INTERVAL, + PEER_RECONNECTION_INTERVAL, + RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; pub use error::Error as NodeError; @@ -342,7 +348,7 @@ impl Node { } } } - }, runtime_handle); + }); } if let Some(pathfinding_scores_sync_url) = self.pathfinding_scores_sync_url.as_ref() { @@ -507,7 +513,7 @@ impl Node { } } } - }, runtime_handle); + }); // Regularly broadcast node announcements. let bcast_cm = Arc::clone(&self.channel_manager); @@ -590,7 +596,7 @@ impl Node { } } } - }, runtime_handle); + }); } let stop_tx_bcast = self.stop_sender.subscribe(); @@ -709,9 +715,7 @@ impl Node { } } } - }, - runtime_handle, - ); + }); } log_info!(self.logger, "Startup complete."); @@ -1538,6 +1542,7 @@ impl Node { }, ) }); + } /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate /// cache. From 8870dff63a8f160ddcb542e3de68953e8f9b503e Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Fri, 21 Nov 2025 13:13:07 +0700 Subject: [PATCH 178/184] fix: compile errors --- src/lib.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 583aa9fae..64d738774 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1704,9 +1704,9 @@ impl Node { /// Alby: Return encoded channel monitors for a recovery of last resort pub fn get_encoded_channel_monitors(&self) -> Result, Error> { - let channel_monitor_store = Arc::clone(&self.kv_store); + let channel_monitor_store: &dyn KVStoreSync = &*self.kv_store; let channel_monitor_logger = Arc::clone(&self.logger); - let keys = channel_monitor_store.list("monitors", "").map_err(|e| { + let keys = KVStoreSync::list(channel_monitor_store, "monitors", "").map_err(|e| { log_error!(channel_monitor_logger, "Failed to get monitor keys: {}", e); Error::ConnectionFailed })?; @@ -1714,10 +1714,11 @@ impl Node { let mut entries = Vec::new(); for key in keys { - let value = channel_monitor_store.read("monitors", "", &key).map_err(|e| { - log_error!(channel_monitor_logger, "Failed to get monitor value: {}", e); - Error::ConnectionFailed - })?; + let value = + KVStoreSync::read(channel_monitor_store, "monitors", "", &key).map_err(|e| { + log_error!(channel_monitor_logger, "Failed to get monitor value: {}", e); + Error::ConnectionFailed + })?; entries.push(KeyValue { key, value }) } @@ -1739,12 +1740,13 @@ impl Node { let mut total_lightning_balance_sats = 0; let mut lightning_balances = Vec::new(); - for channel_id in self.chain_monitor.list_monitors() { + for (channel_id) in self.chain_monitor.list_monitors() { match self.chain_monitor.get_monitor(channel_id) { Ok(monitor) => { funding_txo_by_channel_id.insert(channel_id, funding_txo); let counterparty_node_id = monitor.get_counterparty_node_id(); + let funding_txo = monitor.get_funding_txo(); for ldk_balance in monitor.get_claimable_balances() { total_lightning_balance_sats += ldk_balance.claimable_amount_satoshis(); lightning_balances.push(LightningBalance::from_ldk_balance( @@ -1770,9 +1772,9 @@ impl Node { // by LDK for a while (4032 blocks since balances become empty), so we can still try to access it. // See [`periodically_archive_fully_resolved_monitors`] for details. let funding_txo = - out.channel_id.and_then(|c| funding_txo_by_channel_id.get(&c)).cloned(); + out.channel_id.and_then(|c| funding_txo_by_channel_id.get(&c).cloned()); let chmon = funding_txo.and_then(|txo| self.chain_monitor.get_monitor(txo).ok()); - let counterparty_node_id = chmon.and_then(|m| m.get_counterparty_node_id()); + let counterparty_node_id = chmon.and_then(|m| Some(m.get_counterparty_node_id())); PendingSweepBalance::from_tracked_spendable_output( out, counterparty_node_id, From cea9036ae9777617d6c9909cae18776d69e8295f Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Fri, 21 Nov 2025 14:19:02 +0700 Subject: [PATCH 179/184] fix: more errors --- src/balance.rs | 28 ++++----- src/builder.rs | 35 +++++++---- src/event.rs | 4 +- src/io/sqlite_store/mod.rs | 25 ++++---- src/io/vss_store.rs | 125 +++++++++++++++++++++++-------------- src/lib.rs | 31 +++------ src/payment/spontaneous.rs | 12 ++-- src/payment/store.rs | 2 +- 8 files changed, 144 insertions(+), 118 deletions(-) diff --git a/src/balance.rs b/src/balance.rs index 7a47a1509..6de65bb85 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -6,7 +6,7 @@ // accordance with one or both of these licenses. use bitcoin::secp256k1::PublicKey; -use bitcoin::{Amount, BlockHash, Txid}; +use bitcoin::{Amount, BlockHash, OutPoint, Txid}; use lightning::chain::channelmonitor::{Balance as LdkBalance, BalanceSource}; use lightning::ln::types::ChannelId; use lightning::sign::SpendableOutputDescriptor; @@ -80,7 +80,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u16, + funding_tx_index: u32, /// The amount available to claim, in satoshis, excluding the on-chain fees which will be /// required to do so. amount_satoshis: u64, @@ -138,7 +138,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u16, + funding_tx_index: u32, /// The amount available to claim, in satoshis, possibly excluding the on-chain fees which /// were spent in broadcasting the transaction. amount_satoshis: u64, @@ -165,7 +165,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u16, + funding_tx_index: u32, /// The amount available to claim, in satoshis, excluding the on-chain fees which will be /// required to do so. amount_satoshis: u64, @@ -188,7 +188,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u16, + funding_tx_index: u32, /// The amount potentially available to claim, in satoshis, excluding the on-chain fees /// which will be required to do so. amount_satoshis: u64, @@ -211,7 +211,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u16, + funding_tx_index: u32, /// The amount potentially available to claim, in satoshis, excluding the on-chain fees /// which will be required to do so. amount_satoshis: u64, @@ -234,7 +234,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u16, + funding_tx_index: u32, /// The amount, in satoshis, of the output which we can claim. amount_satoshis: u64, }, @@ -245,7 +245,7 @@ impl LightningBalance { channel_id: ChannelId, counterparty_node_id: PublicKey, funding_txo: OutPoint, balance: LdkBalance, ) -> Self { - let OutPoint { txid: funding_tx_id, index: funding_tx_index } = funding_txo; + let OutPoint { txid: funding_tx_id, vout: funding_tx_index } = funding_txo; match balance { LdkBalance::ClaimableOnChannelClose { balance_candidates, @@ -355,7 +355,7 @@ pub enum PendingSweepBalance { /// Alby: funding transaction ID. funding_tx_id: Option, /// Alby: funding transaction output index. - funding_tx_index: Option, + funding_tx_index: Option, }, /// A spending transaction has been generated and broadcast and is awaiting confirmation /// on-chain. @@ -373,7 +373,7 @@ pub enum PendingSweepBalance { /// Alby: funding transaction ID. funding_tx_id: Option, /// Alby: funding transaction output index. - funding_tx_index: Option, + funding_tx_index: Option, }, /// A spending transaction has been confirmed on-chain and is awaiting threshold confirmations. /// @@ -396,7 +396,7 @@ pub enum PendingSweepBalance { /// Alby: funding transaction ID. funding_tx_id: Option, /// Alby: funding transaction output index. - funding_tx_index: Option, + funding_tx_index: Option, }, } @@ -414,7 +414,7 @@ impl PendingSweepBalance { amount_satoshis, counterparty_node_id, funding_tx_id: funding_txo.map(|funding_txo| funding_txo.txid), - funding_tx_index: funding_txo.map(|funding_txo| funding_txo.index), + funding_tx_index: funding_txo.map(|funding_txo| funding_txo.vout), } }, OutputSpendStatus::PendingFirstConfirmation { @@ -432,7 +432,7 @@ impl PendingSweepBalance { amount_satoshis, counterparty_node_id, funding_tx_id: funding_txo.map(|funding_txo| funding_txo.txid), - funding_tx_index: funding_txo.map(|funding_txo| funding_txo.index), + funding_tx_index: funding_txo.map(|funding_txo| funding_txo.vout), } }, OutputSpendStatus::PendingThresholdConfirmations { @@ -452,7 +452,7 @@ impl PendingSweepBalance { amount_satoshis, counterparty_node_id, funding_tx_id: funding_txo.map(|funding_txo| funding_txo.txid), - funding_tx_index: funding_txo.map(|funding_txo| funding_txo.index), + funding_tx_index: funding_txo.map(|funding_txo| funding_txo.vout), } }, } diff --git a/src/builder.rs b/src/builder.rs index 1e5fe805a..4997d1ed8 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -8,7 +8,7 @@ use std::collections::HashMap; use std::convert::TryInto; use std::default::Default; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex, Once, RwLock}; use std::time::SystemTime; use std::{fmt, fs}; @@ -34,8 +34,13 @@ use lightning::routing::scoring::{ use lightning::sign::{EntropySource, NodeSigner}; use lightning::util::persist::{ KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; + use lightning::util::ser::ReadableArgs; use lightning::util::sweep::OutputSweeper; use lightning_persister::fs_store::FilesystemStore; @@ -65,19 +70,21 @@ use crate::io::{ use crate::liquidity::{ LSPS1ClientConfig, LSPS2ClientConfig, LSPS2ServiceConfig, LiquiditySourceBuilder, }; -use crate::logger::{log_error, LdkLogger, LogLevel, LogWriter, Logger}; +use crate::logger::{log_error, log_info, LdkLogger, LogLevel, LogWriter, Logger}; use crate::message_handler::NodeCustomMessageHandler; use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; use crate::peer_store::PeerStore; use crate::runtime::Runtime; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ - ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeysManager, MessageRouter, - OnionMessenger, PaymentStore, PeerManager, Persister, + ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeyValue, KeysManager, + MessageRouter, MigrateStorage, OnionMessenger, PaymentStore, PeerManager, Persister, + ResetState, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; use crate::{Node, NodeMetrics}; +use chrono::Local; const VSS_HARDENED_CHILD_INDEX: u32 = 877; const VSS_LNURL_AUTH_HARDENED_CHILD_INDEX: u32 = 138; @@ -252,7 +259,7 @@ pub struct NodeBuilder { chain_data_source_config: Option, gossip_source_config: Option, liquidity_source_config: Option, - monitors_to_restore: Option>, + monitors_to_restore: Option>, // Alby: for hub recovery with SCB backup file reset_state: Option, migrate_storage: Option, log_writer_config: Option, @@ -618,8 +625,9 @@ impl NodeBuilder { let storage_dir_path = self.config.storage_dir_path.clone(); fs::create_dir_all(storage_dir_path.clone()) .map_err(|_| BuildError::StoragePathAccessFailed)?; - let sql_store_config = - SqliteStoreConfig { transient_graph: self.config.transient_network_graph }; + let sql_store_config = io::sqlite_store::SqliteStoreConfig { + transient_graph: self.config.transient_network_graph, + }; let kv_store = Arc::new( SqliteStore::with_config( storage_dir_path.into(), @@ -805,13 +813,14 @@ impl NodeBuilder { Some(io::sqlite_store::KV_TABLE_NAME.to_string()), ) .map_err(|_| BuildError::KVStoreSetupFailed)?, - ) as Arc); + ) as Arc); } // Alby: use a secondary KV store for non-essential data (not needed by VSS) let storage_dir_path = config.storage_dir_path.clone(); - let sql_store_config = - SqliteStoreConfig { transient_graph: self.config.transient_network_graph }; + let sql_store_config = io::sqlite_store::SqliteStoreConfig { + transient_graph: self.config.transient_network_graph, + }; let secondary_kv_store = Arc::new( SqliteStore::with_config( storage_dir_path.into(), @@ -851,7 +860,7 @@ impl NodeBuilder { BuildError::KVStoreSetupFailed })?; // write value to new store - vss_store.write(primary_namespace, secondary_namespace, key, &value).map_err( + vss_store.write(primary_namespace, secondary_namespace, key, value).map_err( |e| { log_error!(logger, "Failed to migrate value: {}", e); BuildError::KVStoreSetupFailed @@ -934,7 +943,7 @@ impl NodeBuilder { if self.monitors_to_restore.is_some() { let monitors = self.monitors_to_restore.clone().unwrap(); for monitor in monitors { - let result = kv_store.write("monitors", "", &monitor.key, &monitor.value); + let result = &*kv_store.write("monitors", "", &monitor.key, &monitor.value); if result.is_err() { log_error!(logger, "Failed to restore monitor: {}", result.unwrap_err()); } diff --git a/src/event.rs b/src/event.rs index c309d03a7..2e199de99 100644 --- a/src/event.rs +++ b/src/event.rs @@ -81,7 +81,9 @@ use crate::payment::store::{ PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, }; use crate::runtime::Runtime; -use crate::types::{CustomTlvRecord, DynStore, OnionMessenger, PaymentStore, Sweeper, Wallet}; +use crate::types::{ + CustomTlvRecord, DynStore, OnionMessenger, PaymentStore, Sweeper, TlvEntry, Wallet, +}; use crate::{ hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, UserChannelId, diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 89f3db631..24e033a99 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -16,13 +16,11 @@ use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use lightning::io; -/*use lightning::util::persist::{ - KVStore, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, -}; -use lightning::util::string::PrintableString;*/ -use lightning::util::persist::{KVStore, KVStoreSync}; +use lightning::util::persist::{ + KVStore, KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, +}; use lightning_types::string::PrintableString; use rusqlite::{named_params, Connection}; @@ -60,8 +58,6 @@ impl Default for SqliteStoreConfig { /// /// [SQLite]: https://sqlite.org pub struct SqliteStore { - config: SqliteStoreConfig, - inner: Arc, // Version counter to ensure that writes are applied in the correct order. It is assumed that read and list @@ -117,7 +113,7 @@ impl SqliteStore { if config.transient_graph { // Drop existing network graph if it has been persisted before. - ret.remove( + ret.inner.remove_internal( NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, @@ -125,7 +121,7 @@ impl SqliteStore { )?; } - ret.config = config; + ret.inner.config = config; Ok(ret) } } @@ -270,6 +266,7 @@ struct SqliteStoreInner { data_dir: PathBuf, kv_table_name: String, write_version_locks: Mutex>>>, + config: SqliteStoreConfig, } impl SqliteStoreInner { @@ -344,7 +341,13 @@ impl SqliteStoreInner { let connection = Arc::new(Mutex::new(connection)); let write_version_locks = Mutex::new(HashMap::new()); - Ok(Self { connection, data_dir, kv_table_name, write_version_locks }) + Ok(Self { + connection, + data_dir, + kv_table_name, + write_version_locks, + config: SqliteStoreConfig::default(), + }) } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index c83a840ad..53fc61e13 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -5,15 +5,6 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -/*use crate::io::utils::check_namespace_key_validity; -use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; -use lightning::io::{self, Error, ErrorKind}; -use lightning::util::persist::{ - KVStore, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, -}; -use prost::Message; -use rand::RngCore;*/ use std::boxed::Box; use std::collections::HashMap; use std::future::Future; @@ -28,7 +19,10 @@ use bdk_chain::Merge; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; use lightning::impl_writeable_tlv_based_enum; use lightning::io::{self, Error, ErrorKind}; -use lightning::util::persist::{KVStore, KVStoreSync}; +use lightning::util::persist::{ + KVStore, KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, +}; use lightning::util::ser::{Readable, Writeable}; use prost::Message; use rand::RngCore; @@ -90,7 +84,6 @@ pub struct VssStore { // would deadlock when trying to acquire sync `Mutex` locks that are held by the thread // currently being blocked waiting on the VSS operation to finish. internal_runtime: Option, - // Alby: secondary kv store for saving the network graph as it's large and shouldn't be saved to VSS // NOTE: for Alby Cloud we use a transient network graph (saved in memory and rebuilt on startup) secondary_kv_store: Arc, @@ -191,6 +184,14 @@ impl KVStoreSync for VssStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { + // Alby: read network graph from secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return self.secondary_kv_store.read(primary_namespace, secondary_namespace, key); + } + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { debug_assert!(false, "Failed to access internal runtime"); let msg = format!("Failed to access internal runtime"); @@ -211,6 +212,13 @@ impl KVStoreSync for VssStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { + // Alby: write network graph to secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return self.secondary_kv_store.write(primary_namespace, secondary_namespace, key, buf); + } let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { debug_assert!(false, "Failed to access internal runtime"); let msg = format!("Failed to access internal runtime"); @@ -242,6 +250,18 @@ impl KVStoreSync for VssStore { fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> io::Result<()> { + // Alby: remove network graph from secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return self.secondary_kv_store.remove( + primary_namespace, + secondary_namespace, + key, + lazy, + ); + } let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { debug_assert!(false, "Failed to access internal runtime"); let msg = format!("Failed to access internal runtime"); @@ -271,6 +291,16 @@ impl KVStoreSync for VssStore { } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + // FIXME: list keys + /* + // Alby: also list keys from secondary storage + let secondary_keys = + self.secondary_kv_store.list(primary_namespace, secondary_namespace)?; + + let all_keys: Vec = + keys.iter().cloned().chain(secondary_keys.iter().cloned()).collect(); + Ok(all_keys) + */ let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { debug_assert!(false, "Failed to access internal runtime"); let msg = format!("Failed to access internal runtime"); @@ -292,6 +322,13 @@ impl KVStore for VssStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Pin, io::Error>> + Send>> { + // Alby: read network graph from secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return self.secondary_kv_store.read(primary_namespace, secondary_namespace, key); + } let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); @@ -305,6 +342,13 @@ impl KVStore for VssStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> Pin> + Send>> { + // Alby: write network graph to secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return self.secondary_kv_store.write(primary_namespace, secondary_namespace, key, buf); + } let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); let primary_namespace = primary_namespace.to_string(); @@ -329,6 +373,18 @@ impl KVStore for VssStore { fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> Pin> + Send>> { + // Alby: remove network graph from secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return self.secondary_kv_store.remove( + primary_namespace, + secondary_namespace, + key, + lazy, + ); + } let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); let primary_namespace = primary_namespace.to_string(); @@ -353,6 +409,16 @@ impl KVStore for VssStore { fn list( &self, primary_namespace: &str, secondary_namespace: &str, ) -> Pin, io::Error>> + Send>> { + // FIXME: list keys + /* + // Alby: also list keys from secondary storage + let secondary_keys = + self.secondary_kv_store.list(primary_namespace, secondary_namespace)?; + + let all_keys: Vec = + keys.iter().cloned().chain(secondary_keys.iter().cloned()).collect(); + Ok(all_keys) + */ let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); @@ -495,14 +561,6 @@ impl VssStoreInner { &self, client: &VssClient, primary_namespace: String, secondary_namespace: String, key: String, ) -> io::Result> { - // Alby: read network graph from secondary storage - if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE - && key == NETWORK_GRAPH_PERSISTENCE_KEY - { - return self.secondary_kv_store.read(primary_namespace, secondary_namespace, key); - } - check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); @@ -540,14 +598,6 @@ impl VssStoreInner { locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, ) -> io::Result<()> { - // Alby: write network graph to secondary storage - if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE - && key == NETWORK_GRAPH_PERSISTENCE_KEY - { - return self.secondary_kv_store.write(primary_namespace, secondary_namespace, key, buf); - } - check_namespace_key_validity( &primary_namespace, &secondary_namespace, @@ -603,19 +653,6 @@ impl VssStoreInner { locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, ) -> io::Result<()> { - // Alby: remove network graph from secondary storage - if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE - && key == NETWORK_GRAPH_PERSISTENCE_KEY - { - return self.secondary_kv_store.remove( - primary_namespace, - secondary_namespace, - key, - _lazy, - ); - } - check_namespace_key_validity( &primary_namespace, &secondary_namespace, @@ -667,13 +704,7 @@ impl VssStoreInner { Error::new(ErrorKind::Other, msg) })?; - // Alby: also list keys from secondary storage - let secondary_keys = - self.secondary_kv_store.list(primary_namespace, secondary_namespace)?; - - let all_keys: Vec = - keys.iter().cloned().chain(secondary_keys.iter().cloned()).collect(); - Ok(all_keys) + Ok(keys) } async fn execute_locked_write< diff --git a/src/lib.rs b/src/lib.rs index 64d738774..77fe8c244 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -108,8 +108,9 @@ pub use lightning; pub use lightning_invoice; pub use lightning_liquidity; pub use lightning_types; -use std::collections::HashMap; + pub use vss_client;*/ +use std::collections::HashMap; use std::default::Default; use std::net::ToSocketAddrs; use std::sync::{Arc, Mutex, RwLock}; @@ -207,6 +208,8 @@ pub use { use crate::scoring::setup_background_pathfinding_scores_sync; +use crate::types::{KeyValue, MigrateStorage, ResetState, TlvEntry}; + #[cfg(feature = "uniffi")] uniffi::include_scaffolding!("ldk_node"); @@ -262,10 +265,6 @@ impl Node { return Err(Error::AlreadyRunning); } - let mut background_tasks = tokio::task::JoinSet::new(); - let mut cancellable_background_tasks = tokio::task::JoinSet::new(); - let runtime_handle = runtime.handle(); - log_info!( self.logger, "Starting up LDK Node with node ID {} on network: {}", @@ -501,7 +500,7 @@ impl Node { // increase backoff randomly e.g. for the first 6 iterations: // 1, [2-3], [3-5], [4-7], [5-9], [6-11], [7-13] let mut new_peer_retry_backoff = peer_retry_backoff + 1; - new_peer_retry_backoff += rand::thread_rng().gen_range(0..new_peer_retry_backoff); + new_peer_retry_backoff += rand::rng().gen_range(0..new_peer_retry_backoff); if new_peer_retry_backoff > 360 { new_peer_retry_backoff = 360 // 360 * 10 seconds = approx 1 hour maximum backoff } @@ -696,8 +695,6 @@ impl Node { panic!("Failed to process events"); }); }); - debug_assert!(self.background_processor_task.lock().unwrap().is_none()); - *self.background_processor_task.lock().unwrap() = Some(handle); if let Some(liquidity_source) = self.liquidity_source.as_ref() { let mut stop_liquidity_handler = self.stop_sender.subscribe(); @@ -749,20 +746,6 @@ impl Node { debug_assert!(false); }); - // Cancel cancellable background tasks - self.runtime.abort_cancellable_background_tasks(); - - // Cancel cancellable background tasks - if let Some(mut tasks) = self.cancellable_background_tasks.lock().unwrap().take() { - let runtime_2 = Arc::clone(&runtime); - tasks.abort_all(); - tokio::task::block_in_place(move || { - runtime_2.block_on(async { while let Some(_) = tasks.join_next().await {} }) - }); - } else { - debug_assert!(false, "Expected some cancellable background tasks"); - }; - // Disconnect all peers. self.peer_manager.disconnect_all_peers(); log_debug!(self.logger, "Disconnected all network peers."); @@ -1740,13 +1723,13 @@ impl Node { let mut total_lightning_balance_sats = 0; let mut lightning_balances = Vec::new(); - for (channel_id) in self.chain_monitor.list_monitors() { + for channel_id in self.chain_monitor.list_monitors() { match self.chain_monitor.get_monitor(channel_id) { Ok(monitor) => { + let funding_txo = monitor.get_funding_txo(); funding_txo_by_channel_id.insert(channel_id, funding_txo); let counterparty_node_id = monitor.get_counterparty_node_id(); - let funding_txo = monitor.get_funding_txo(); for ldk_balance in monitor.get_claimable_balances() { total_lightning_balance_sats += ldk_balance.claimable_amount_satoshis(); lightning_balances.push(LightningBalance::from_ldk_balance( diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 568be66d0..739eb1cbb 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -19,9 +19,7 @@ use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::error::Error; use crate::logger::{log_error, log_info, LdkLogger, Logger}; use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; -//use crate::payment::SendingParameters; -//use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore, TlvEntry}; -use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore}; +use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore, TlvEntry}; // The default `final_cltv_expiry_delta` we apply when not set. const LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA: u32 = 144; @@ -54,8 +52,9 @@ impl SpontaneousPayment { /// If `sending_parameters` are provided they will override the default as well as the /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. pub fn send_with_tlvs_and_preimage( - &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, - custom_tlvs: Vec, preimage: Option, + &self, amount_msat: u64, node_id: PublicKey, + route_parameters: Option, custom_tlvs: Vec, + preimage: Option, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { @@ -81,8 +80,7 @@ impl SpontaneousPayment { amount_msat, ); - let override_params = - sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); + let override_params = route_parameters.as_ref().or(self.config.route_parameters.as_ref()); if let Some(override_params) = override_params { override_params .max_total_routing_fee_msat diff --git a/src/payment/store.rs b/src/payment/store.rs index 01d5f3922..dae50a50d 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -5,7 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -//use crate::types::TlvEntry; +use crate::types::TlvEntry; use std::time::{Duration, SystemTime, UNIX_EPOCH}; From 325457c868601e838708f01d0b2b8abdc0da1b37 Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Fri, 21 Nov 2025 15:19:44 +0700 Subject: [PATCH 180/184] fix: more compile errors --- bindings/ldk_node.udl | 33 +++++------ src/builder.rs | 17 ++++-- src/event.rs | 4 -- src/io/sqlite_store/mod.rs | 3 +- src/io/vss_store.rs | 40 ++++++++++--- src/lib.rs | 29 ---------- src/payment/spontaneous.rs | 114 +------------------------------------ 7 files changed, 63 insertions(+), 177 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 547d045fc..1f82a171f 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -243,16 +243,13 @@ interface Bolt12Payment { }; interface SpontaneousPayment { - // Alby: custom TLV & preimage (TODO: update to use send_with_preimage_and_custom_tlvs) + PaymentId send(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters); [Throws=NodeError] - PaymentId send_with_tlvs_and_preimage(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters, sequence custom_tlvs, PaymentPreimage? preimage); - //PaymentId send(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters); - //[Throws=NodeError] - //PaymentId send_with_custom_tlvs(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters, sequence custom_tlvs); - //[Throws=NodeError] - //PaymentId send_with_preimage(u64 amount_msat, PublicKey node_id, PaymentPreimage preimage, RouteParametersConfig? route_parameters); - //[Throws=NodeError] - //PaymentId send_with_preimage_and_custom_tlvs(u64 amount_msat, PublicKey node_id, sequence custom_tlvs, PaymentPreimage preimage, RouteParametersConfig? route_parameters); + PaymentId send_with_custom_tlvs(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters, sequence custom_tlvs); + [Throws=NodeError] + PaymentId send_with_preimage(u64 amount_msat, PublicKey node_id, PaymentPreimage preimage, RouteParametersConfig? route_parameters); + [Throws=NodeError] + PaymentId send_with_preimage_and_custom_tlvs(u64 amount_msat, PublicKey node_id, sequence custom_tlvs, PaymentPreimage preimage, RouteParametersConfig? route_parameters); [Throws=NodeError] void send_probes(u64 amount_msat, PublicKey node_id); }; @@ -626,7 +623,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u16 funding_tx_index, + u32 funding_tx_index, u64 amount_satoshis, u64 transaction_fee_satoshis, u64 outbound_payment_htlc_rounded_msat, @@ -638,7 +635,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u16 funding_tx_index, + u32 funding_tx_index, u64 amount_satoshis, u32 confirmation_height, BalanceSource source @@ -647,7 +644,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u16 funding_tx_index, + u32 funding_tx_index, u64 amount_satoshis, u32 timeout_height, PaymentHash payment_hash, @@ -657,7 +654,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u16 funding_tx_index, + u32 funding_tx_index, u64 amount_satoshis, u32 claimable_height, PaymentHash payment_hash, @@ -667,7 +664,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u16 funding_tx_index, + u32 funding_tx_index, u64 amount_satoshis, u32 expiry_height, PaymentHash payment_hash @@ -676,7 +673,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u16 funding_tx_index, + u32 funding_tx_index, u64 amount_satoshis ); }; @@ -695,7 +692,7 @@ interface PendingSweepBalance { u64 amount_satoshis, PublicKey? counterparty_node_id, Txid? funding_tx_id, - u16? funding_tx_index + u32? funding_tx_index ); BroadcastAwaitingConfirmation ( ChannelId? channel_id, @@ -704,7 +701,7 @@ interface PendingSweepBalance { u64 amount_satoshis, PublicKey? counterparty_node_id, Txid? funding_tx_id, - u16? funding_tx_index + u32? funding_tx_index ); AwaitingThresholdConfirmations ( ChannelId? channel_id, @@ -714,7 +711,7 @@ interface PendingSweepBalance { u64 amount_satoshis, PublicKey? counterparty_node_id, Txid? funding_tx_id, - u16? funding_tx_index + u32? funding_tx_index ); }; diff --git a/src/builder.rs b/src/builder.rs index 4997d1ed8..ba81967ba 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -943,7 +943,13 @@ impl NodeBuilder { if self.monitors_to_restore.is_some() { let monitors = self.monitors_to_restore.clone().unwrap(); for monitor in monitors { - let result = &*kv_store.write("monitors", "", &monitor.key, &monitor.value); + let result = KVStoreSync::write( + &*kv_store, + "monitors", + "", + &monitor.key, + monitor.value.clone(), + ); if result.is_err() { log_error!(logger, "Failed to restore monitor: {}", result.unwrap_err()); } @@ -2222,7 +2228,8 @@ fn reset_persistent_state(logger: Arc, kv_store: Arc, what: Re }; if node_metrics { - let result = kv_store.remove( + let result = KVStoreSync::remove( + &*kv_store, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, NODE_METRICS_KEY, @@ -2234,7 +2241,8 @@ fn reset_persistent_state(logger: Arc, kv_store: Arc, what: Re } if scorer { - let result = kv_store.remove( + let result = KVStoreSync::remove( + &*kv_store, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, @@ -2246,7 +2254,8 @@ fn reset_persistent_state(logger: Arc, kv_store: Arc, what: Re } if network_graph { - let result = kv_store.remove( + let result = KVStoreSync::remove( + &*kv_store, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, diff --git a/src/event.rs b/src/event.rs index 2e199de99..6a96473a1 100644 --- a/src/event.rs +++ b/src/event.rs @@ -11,10 +11,6 @@ use std::collections::VecDeque; use std::ops::Deref; use std::sync::{Arc, Mutex}; -/*use crate::{ - hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, - TlvEntry, UserChannelId, -};*/ use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::secp256k1::PublicKey; use bitcoin::{Amount, OutPoint}; diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 24e033a99..71ed8a58e 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -113,7 +113,8 @@ impl SqliteStore { if config.transient_graph { // Drop existing network graph if it has been persisted before. - ret.inner.remove_internal( + KVStoreSync::remove( + &ret, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 53fc61e13..61b1f743f 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -86,14 +86,14 @@ pub struct VssStore { internal_runtime: Option, // Alby: secondary kv store for saving the network graph as it's large and shouldn't be saved to VSS // NOTE: for Alby Cloud we use a transient network graph (saved in memory and rebuilt on startup) - secondary_kv_store: Arc, + secondary_kv_store: Arc, } impl VssStore { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, - secondary_kv_store: Arc, + secondary_kv_store: Arc, ) -> io::Result { let next_version = AtomicU64::new(1); let internal_runtime = tokio::runtime::Builder::new_multi_thread() @@ -189,7 +189,12 @@ impl KVStoreSync for VssStore { && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE && key == NETWORK_GRAPH_PERSISTENCE_KEY { - return self.secondary_kv_store.read(primary_namespace, secondary_namespace, key); + return lightning::util::persist::KVStoreSync::read( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + ); } let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { @@ -217,7 +222,13 @@ impl KVStoreSync for VssStore { && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE && key == NETWORK_GRAPH_PERSISTENCE_KEY { - return self.secondary_kv_store.write(primary_namespace, secondary_namespace, key, buf); + return lightning::util::persist::KVStoreSync::write( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + buf, + ); } let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { debug_assert!(false, "Failed to access internal runtime"); @@ -255,7 +266,8 @@ impl KVStoreSync for VssStore { && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE && key == NETWORK_GRAPH_PERSISTENCE_KEY { - return self.secondary_kv_store.remove( + return lightning::util::persist::KVStoreSync::remove( + &*self.secondary_kv_store, primary_namespace, secondary_namespace, key, @@ -327,7 +339,12 @@ impl KVStore for VssStore { && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE && key == NETWORK_GRAPH_PERSISTENCE_KEY { - return self.secondary_kv_store.read(primary_namespace, secondary_namespace, key); + return lightning::util::persist::KVStore::read( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + ); } let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); @@ -347,7 +364,13 @@ impl KVStore for VssStore { && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE && key == NETWORK_GRAPH_PERSISTENCE_KEY { - return self.secondary_kv_store.write(primary_namespace, secondary_namespace, key, buf); + return lightning::util::persist::KVStore::write( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + buf, + ); } let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); @@ -378,7 +401,8 @@ impl KVStore for VssStore { && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE && key == NETWORK_GRAPH_PERSISTENCE_KEY { - return self.secondary_kv_store.remove( + return lightning::util::persist::KVStore::remove( + &*self.secondary_kv_store, primary_namespace, secondary_namespace, key, diff --git a/src/lib.rs b/src/lib.rs index 77fe8c244..d5a307c02 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -102,14 +102,6 @@ mod tx_broadcaster; mod types; mod wallet; -/*pub use bip39; -pub use bitcoin; -pub use lightning; -pub use lightning_invoice; -pub use lightning_liquidity; -pub use lightning_types; - -pub use vss_client;*/ use std::collections::HashMap; use std::default::Default; use std::net::ToSocketAddrs; @@ -169,27 +161,6 @@ use payment::{ UnifiedQrPayment, }; use peer_store::{PeerInfo, PeerStore}; -/*use types::{ - Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, - KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, -}; -pub use types::{ChannelDetails, CustomTlvRecord, KeyValue, PeerDetails, TlvEntry, UserChannelId}; -#[cfg(feature = "uniffi")] -use types::{MigrateStorage, ResetState}; - -use logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; - -use lightning::chain::BestBlock; -use lightning::events::bump_transaction::Wallet as LdkWallet; -use lightning::impl_writeable_tlv_based; -use lightning::ln::channel_state::ChannelShutdownState; -use lightning::ln::channelmanager::PaymentId; -use lightning::ln::msgs::SocketAddress; -use lightning::routing::gossip::NodeAlias; - -use lightning_background_processor::process_events_async; - -use bitcoin::secp256k1::PublicKey;*/ use rand::Rng; use runtime::Runtime; diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 739eb1cbb..5f1825dea 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -19,7 +19,7 @@ use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::error::Error; use crate::logger::{log_error, log_info, LdkLogger, Logger}; use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; -use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore, TlvEntry}; +use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore}; // The default `final_cltv_expiry_delta` we apply when not set. const LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA: u32 = 144; @@ -47,118 +47,6 @@ impl SpontaneousPayment { Self { channel_manager, keys_manager, payment_store, config, is_running, logger } } - // Alby: send a keysend payment with TLVs and preimage - /// - /// If `sending_parameters` are provided they will override the default as well as the - /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. - pub fn send_with_tlvs_and_preimage( - &self, amount_msat: u64, node_id: PublicKey, - route_parameters: Option, custom_tlvs: Vec, - preimage: Option, - ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { - return Err(Error::NotRunning); - } - - let payment_preimage = preimage - .unwrap_or_else(|| PaymentPreimage(self.keys_manager.get_secure_random_bytes())); - let payment_hash = PaymentHash::from(payment_preimage); - let payment_id = PaymentId(payment_hash.0); - - if let Some(payment) = self.payment_store.get(&payment_id) { - if payment.status == PaymentStatus::Pending - || payment.status == PaymentStatus::Succeeded - { - log_error!(self.logger, "Payment error: must not send duplicate payments."); - return Err(Error::DuplicatePayment); - } - } - - let mut route_params = RouteParameters::from_payment_params_and_value( - PaymentParameters::from_node_id(node_id, LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA), - amount_msat, - ); - - let override_params = route_parameters.as_ref().or(self.config.route_parameters.as_ref()); - if let Some(override_params) = override_params { - override_params - .max_total_routing_fee_msat - .map(|f| route_params.max_total_routing_fee_msat = f.into()); - override_params - .max_total_cltv_expiry_delta - .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); - override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); - override_params - .max_channel_saturation_power_of_half - .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); - }; - - let recipient_fields = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs( - custom_tlvs.iter().map(|tlv| (tlv.r#type, tlv.value.clone())).collect(), - ) - .map_err(|_| { - log_error!(self.logger, "Payment error: invalid custom TLVs."); - Error::InvalidCustomTlv - })?; - - match self.channel_manager.send_spontaneous_payment( - Some(payment_preimage), - recipient_fields, - PaymentId(payment_hash.0), - route_params, - Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT), - ) { - Ok(_hash) => { - log_info!(self.logger, "Initiated sending {}msat to {}.", amount_msat, node_id); - - let kind = PaymentKind::Spontaneous { - hash: payment_hash, - preimage: Some(payment_preimage), - custom_tlvs, - }; - let payment = PaymentDetails::new( - payment_id, - kind, - Some(amount_msat), - None, - PaymentDirection::Outbound, - PaymentStatus::Pending, - ); - - self.payment_store.insert(payment)?; - - Ok(payment_id) - }, - Err(e) => { - log_error!(self.logger, "Failed to send payment: {:?}", e); - - match e { - RetryableSendFailure::DuplicatePayment => Err(Error::DuplicatePayment), - _ => { - let kind = PaymentKind::Spontaneous { - hash: payment_hash, - preimage: Some(payment_preimage), - custom_tlvs, - }; - let payment = PaymentDetails::new( - payment_id, - kind, - Some(amount_msat), - None, - PaymentDirection::Outbound, - PaymentStatus::Failed, - ); - - self.payment_store.insert(payment)?; - Err(Error::PaymentSendingFailed) - }, - } - }, - } - } - /// Send a spontaneous aka. "keysend", payment. /// /// If `route_parameters` are provided they will override the default as well as the From 9c20b764473883fb307aed16f4905d3cbb425d44 Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Fri, 21 Nov 2025 16:43:57 +0700 Subject: [PATCH 181/184] fix: compile errors --- bindings/ldk_node.udl | 37 +++++++++--------- src/balance.rs | 29 ++++++++------- src/builder.rs | 9 +++-- src/io/sqlite_store/migrations.rs | 3 +- src/io/sqlite_store/mod.rs | 62 +++++++++++++++---------------- src/lib.rs | 40 ++++++-------------- tests/common/mod.rs | 1 + 7 files changed, 82 insertions(+), 99 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 1f82a171f..d3c788463 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -243,6 +243,7 @@ interface Bolt12Payment { }; interface SpontaneousPayment { + [Throws=NodeError] PaymentId send(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters); [Throws=NodeError] PaymentId send_with_custom_tlvs(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters, sequence custom_tlvs); @@ -560,16 +561,16 @@ enum LSPS1PaymentState { "Refunded", }; -// [NonExhaustive] -// enum Network { -// "Bitcoin", -// "Testnet", -// "Signet", -// "Regtest", -// }; +[NonExhaustive] +enum Network { + "Bitcoin", + "Testnet", + "Signet", + "Regtest", +}; -[Custom] -typedef string Network; +// [Custom] +// typedef string Network; dictionary OutPoint { Txid txid; @@ -623,7 +624,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u32 funding_tx_index, + u16 funding_tx_index, u64 amount_satoshis, u64 transaction_fee_satoshis, u64 outbound_payment_htlc_rounded_msat, @@ -635,7 +636,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u32 funding_tx_index, + u16 funding_tx_index, u64 amount_satoshis, u32 confirmation_height, BalanceSource source @@ -644,7 +645,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u32 funding_tx_index, + u16 funding_tx_index, u64 amount_satoshis, u32 timeout_height, PaymentHash payment_hash, @@ -654,7 +655,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u32 funding_tx_index, + u16 funding_tx_index, u64 amount_satoshis, u32 claimable_height, PaymentHash payment_hash, @@ -664,7 +665,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u32 funding_tx_index, + u16 funding_tx_index, u64 amount_satoshis, u32 expiry_height, PaymentHash payment_hash @@ -673,7 +674,7 @@ interface LightningBalance { ChannelId channel_id, PublicKey counterparty_node_id, Txid funding_tx_id, - u32 funding_tx_index, + u16 funding_tx_index, u64 amount_satoshis ); }; @@ -692,7 +693,7 @@ interface PendingSweepBalance { u64 amount_satoshis, PublicKey? counterparty_node_id, Txid? funding_tx_id, - u32? funding_tx_index + u16? funding_tx_index ); BroadcastAwaitingConfirmation ( ChannelId? channel_id, @@ -701,7 +702,7 @@ interface PendingSweepBalance { u64 amount_satoshis, PublicKey? counterparty_node_id, Txid? funding_tx_id, - u32? funding_tx_index + u16? funding_tx_index ); AwaitingThresholdConfirmations ( ChannelId? channel_id, @@ -711,7 +712,7 @@ interface PendingSweepBalance { u64 amount_satoshis, PublicKey? counterparty_node_id, Txid? funding_tx_id, - u32? funding_tx_index + u16? funding_tx_index ); }; diff --git a/src/balance.rs b/src/balance.rs index 6de65bb85..75fac1b90 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -6,8 +6,9 @@ // accordance with one or both of these licenses. use bitcoin::secp256k1::PublicKey; -use bitcoin::{Amount, BlockHash, OutPoint, Txid}; +use bitcoin::{Amount, BlockHash, Txid}; use lightning::chain::channelmonitor::{Balance as LdkBalance, BalanceSource}; +use lightning::chain::transaction::OutPoint; use lightning::ln::types::ChannelId; use lightning::sign::SpendableOutputDescriptor; use lightning::util::sweep::{OutputSpendStatus, TrackedSpendableOutput}; @@ -80,7 +81,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u32, + funding_tx_index: u16, /// The amount available to claim, in satoshis, excluding the on-chain fees which will be /// required to do so. amount_satoshis: u64, @@ -138,7 +139,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u32, + funding_tx_index: u16, /// The amount available to claim, in satoshis, possibly excluding the on-chain fees which /// were spent in broadcasting the transaction. amount_satoshis: u64, @@ -165,7 +166,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u32, + funding_tx_index: u16, /// The amount available to claim, in satoshis, excluding the on-chain fees which will be /// required to do so. amount_satoshis: u64, @@ -188,7 +189,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u32, + funding_tx_index: u16, /// The amount potentially available to claim, in satoshis, excluding the on-chain fees /// which will be required to do so. amount_satoshis: u64, @@ -211,7 +212,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u32, + funding_tx_index: u16, /// The amount potentially available to claim, in satoshis, excluding the on-chain fees /// which will be required to do so. amount_satoshis: u64, @@ -234,7 +235,7 @@ pub enum LightningBalance { /// Alby: funding transaction ID. funding_tx_id: Txid, /// Alby: funding transaction output index. - funding_tx_index: u32, + funding_tx_index: u16, /// The amount, in satoshis, of the output which we can claim. amount_satoshis: u64, }, @@ -245,7 +246,7 @@ impl LightningBalance { channel_id: ChannelId, counterparty_node_id: PublicKey, funding_txo: OutPoint, balance: LdkBalance, ) -> Self { - let OutPoint { txid: funding_tx_id, vout: funding_tx_index } = funding_txo; + let OutPoint { txid: funding_tx_id, index: funding_tx_index } = funding_txo; match balance { LdkBalance::ClaimableOnChannelClose { balance_candidates, @@ -355,7 +356,7 @@ pub enum PendingSweepBalance { /// Alby: funding transaction ID. funding_tx_id: Option, /// Alby: funding transaction output index. - funding_tx_index: Option, + funding_tx_index: Option, }, /// A spending transaction has been generated and broadcast and is awaiting confirmation /// on-chain. @@ -373,7 +374,7 @@ pub enum PendingSweepBalance { /// Alby: funding transaction ID. funding_tx_id: Option, /// Alby: funding transaction output index. - funding_tx_index: Option, + funding_tx_index: Option, }, /// A spending transaction has been confirmed on-chain and is awaiting threshold confirmations. /// @@ -396,7 +397,7 @@ pub enum PendingSweepBalance { /// Alby: funding transaction ID. funding_tx_id: Option, /// Alby: funding transaction output index. - funding_tx_index: Option, + funding_tx_index: Option, }, } @@ -414,7 +415,7 @@ impl PendingSweepBalance { amount_satoshis, counterparty_node_id, funding_tx_id: funding_txo.map(|funding_txo| funding_txo.txid), - funding_tx_index: funding_txo.map(|funding_txo| funding_txo.vout), + funding_tx_index: funding_txo.map(|funding_txo| funding_txo.index), } }, OutputSpendStatus::PendingFirstConfirmation { @@ -432,7 +433,7 @@ impl PendingSweepBalance { amount_satoshis, counterparty_node_id, funding_tx_id: funding_txo.map(|funding_txo| funding_txo.txid), - funding_tx_index: funding_txo.map(|funding_txo| funding_txo.vout), + funding_tx_index: funding_txo.map(|funding_txo| funding_txo.index), } }, OutputSpendStatus::PendingThresholdConfirmations { @@ -452,7 +453,7 @@ impl PendingSweepBalance { amount_satoshis, counterparty_node_id, funding_tx_id: funding_txo.map(|funding_txo| funding_txo.txid), - funding_tx_index: funding_txo.map(|funding_txo| funding_txo.vout), + funding_tx_index: funding_txo.map(|funding_txo| funding_txo.index), } }, } diff --git a/src/builder.rs b/src/builder.rs index ba81967ba..dfbf3e2b6 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -629,11 +629,11 @@ impl NodeBuilder { transient_graph: self.config.transient_network_graph, }; let kv_store = Arc::new( - SqliteStore::with_config( + SqliteStore::new( storage_dir_path.into(), Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()), Some(io::sqlite_store::KV_TABLE_NAME.to_string()), - sql_store_config, + Some(sql_store_config), ) .map_err(|_| BuildError::KVStoreSetupFailed)?, ); @@ -811,6 +811,7 @@ impl NodeBuilder { storage_dir_path.into(), Some(backup_filename), Some(io::sqlite_store::KV_TABLE_NAME.to_string()), + None, ) .map_err(|_| BuildError::KVStoreSetupFailed)?, ) as Arc); @@ -822,11 +823,11 @@ impl NodeBuilder { transient_graph: self.config.transient_network_graph, }; let secondary_kv_store = Arc::new( - SqliteStore::with_config( + SqliteStore::new( storage_dir_path.into(), Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()), Some(io::sqlite_store::KV_TABLE_NAME.to_string()), - sql_store_config, + Some(sql_store_config), ) .map_err(|_| BuildError::KVStoreSetupFailed)?, ) as Arc; diff --git a/src/io/sqlite_store/migrations.rs b/src/io/sqlite_store/migrations.rs index abfbdf6ef..f8f80c924 100644 --- a/src/io/sqlite_store/migrations.rs +++ b/src/io/sqlite_store/migrations.rs @@ -159,7 +159,8 @@ mod tests { } // Check we migrate the db just fine without losing our written data. - let store = SqliteStore::new(temp_path, Some(db_file_name), Some(kv_table_name)).unwrap(); + let store = + SqliteStore::new(temp_path, Some(db_file_name), Some(kv_table_name), None).unwrap(); let res = store.read(&test_namespace, "", &test_key).unwrap(); assert_eq!(res, test_data); diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 71ed8a58e..337211584 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -43,6 +43,7 @@ pub const DEFAULT_KV_TABLE_NAME: &str = "ldk_data"; const SCHEMA_USER_VERSION: u16 = 2; /// Alby: extended SqliteStore configuration. +#[derive(Clone)] pub struct SqliteStoreConfig { /// Do not persist network graph. pub(crate) transient_graph: bool, @@ -74,10 +75,27 @@ impl SqliteStore { /// Similarly, the given `kv_table_name` will be used or default to [`DEFAULT_KV_TABLE_NAME`]. pub fn new( data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, + config: Option, ) -> io::Result { - let inner = Arc::new(SqliteStoreInner::new(data_dir, db_file_name, kv_table_name)?); + let config = config.unwrap_or_default(); + let inner = + Arc::new(SqliteStoreInner::new(data_dir, db_file_name, kv_table_name, config.clone())?); let next_write_version = AtomicU64::new(1); - Ok(Self { inner, next_write_version }) + let store = Self { inner, next_write_version }; + + // Alby: enable not saving network graph (Alby Cloud) + if config.transient_graph { + // Drop existing network graph if it has been persisted before. + KVStoreSync::remove( + &store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + false, + )?; + } + + Ok(store) } fn build_locking_key( @@ -103,28 +121,6 @@ impl SqliteStore { pub fn get_data_dir(&self) -> PathBuf { self.inner.data_dir.clone() } - - /// Alby: constructs a new [`SqliteStore`] with an extended configuration. - pub fn with_config( - data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, - config: SqliteStoreConfig, - ) -> io::Result { - let mut ret = SqliteStore::new(data_dir, db_file_name, kv_table_name)?; - - if config.transient_graph { - // Drop existing network graph if it has been persisted before. - KVStoreSync::remove( - &ret, - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, - false, - )?; - } - - ret.inner.config = config; - Ok(ret) - } } impl KVStore for SqliteStore { @@ -273,6 +269,7 @@ struct SqliteStoreInner { impl SqliteStoreInner { fn new( data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, + config: SqliteStoreConfig, ) -> io::Result { let db_file_name = db_file_name.unwrap_or(DEFAULT_SQLITE_DB_FILE_NAME.to_string()); let kv_table_name = kv_table_name.unwrap_or(DEFAULT_KV_TABLE_NAME.to_string()); @@ -342,13 +339,7 @@ impl SqliteStoreInner { let connection = Arc::new(Mutex::new(connection)); let write_version_locks = Mutex::new(HashMap::new()); - Ok(Self { - connection, - data_dir, - kv_table_name, - write_version_locks, - config: SqliteStoreConfig::default(), - }) + Ok(Self { connection, data_dir, kv_table_name, write_version_locks, config }) } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { @@ -602,6 +593,7 @@ mod tests { temp_path, Some("test_db".to_string()), Some("test_table".to_string()), + None, ) .unwrap(); do_read_write_remove_list_persist(&store); @@ -615,12 +607,14 @@ mod tests { temp_path.clone(), Some("test_db_0".to_string()), Some("test_table".to_string()), + None, ) .unwrap(); let store_1 = SqliteStore::new( temp_path, Some("test_db_1".to_string()), Some("test_table".to_string()), + None, ) .unwrap(); do_test_store(&store_0, &store_1) @@ -634,8 +628,10 @@ pub mod bench { /// Bench! pub fn bench_sends(bench: &mut Criterion) { - let store_a = super::SqliteStore::new("bench_sqlite_store_a".into(), None, None).unwrap(); - let store_b = super::SqliteStore::new("bench_sqlite_store_b".into(), None, None).unwrap(); + let store_a = + super::SqliteStore::new("bench_sqlite_store_a".into(), None, None, None).unwrap(); + let store_b = + super::SqliteStore::new("bench_sqlite_store_b".into(), None, None, None).unwrap(); lightning::ln::channelmanager::bench::bench_two_sends( bench, "bench_sqlite_persisted_sends", diff --git a/src/lib.rs b/src/lib.rs index d5a307c02..872d0853f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -471,7 +471,7 @@ impl Node { // increase backoff randomly e.g. for the first 6 iterations: // 1, [2-3], [3-5], [4-7], [5-9], [6-11], [7-13] let mut new_peer_retry_backoff = peer_retry_backoff + 1; - new_peer_retry_backoff += rand::rng().gen_range(0..new_peer_retry_backoff); + new_peer_retry_backoff += rand::rng().random_range(0..new_peer_retry_backoff); if new_peer_retry_backoff > 360 { new_peer_retry_backoff = 360 // 360 * 10 seconds = approx 1 hour maximum backoff } @@ -681,6 +681,7 @@ impl Node { ); return; } + _ = liquidity_handler.handle_next_event() => {} } } }); @@ -717,6 +718,9 @@ impl Node { debug_assert!(false); }); + // Cancel cancellable background tasks + self.runtime.abort_cancellable_background_tasks(); + // Disconnect all peers. self.peer_manager.disconnect_all_peers(); log_debug!(self.logger, "Disconnected all network peers."); @@ -1472,30 +1476,15 @@ impl Node { /// Alby: update fee estimates separately rather than doing a full sync pub fn update_fee_estimates(&self) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } let chain_source = Arc::clone(&self.chain_source); - tokio::task::block_in_place(move || { - tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on( - async move { - match chain_source.as_ref() { - ChainSource::Esplora { .. } => { - chain_source.update_fee_rate_estimates().await?; - }, - ChainSource::Electrum { .. } => { - chain_source.update_fee_rate_estimates().await?; - }, - ChainSource::BitcoindRpc { .. } => { - chain_source.update_fee_rate_estimates().await?; - }, - } - Ok(()) - }, - ) - }); + self.runtime.block_on(async move { + chain_source.update_fee_rate_estimates().await?; + Ok(()) + }) } /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate @@ -1649,13 +1638,6 @@ impl Node { self.payment_store.remove(&payment_id) } - /// Alby: Used to recover funds after restoring static channel backup - pub fn force_close_all_channels_without_broadcasting_txn(&self) { - self.channel_manager.force_close_all_channels_without_broadcasting_txn( - "lost or corrupted channel state".to_string(), - ); - } - /// Alby: Return encoded channel monitors for a recovery of last resort pub fn get_encoded_channel_monitors(&self) -> Result, Error> { let channel_monitor_store: &dyn KVStoreSync = &*self.kv_store; @@ -1727,7 +1709,7 @@ impl Node { // See [`periodically_archive_fully_resolved_monitors`] for details. let funding_txo = out.channel_id.and_then(|c| funding_txo_by_channel_id.get(&c).cloned()); - let chmon = funding_txo.and_then(|txo| self.chain_monitor.get_monitor(txo).ok()); + let chmon = out.channel_id.and_then(|c| self.chain_monitor.get_monitor(c).ok()); let counterparty_node_id = chmon.and_then(|m| Some(m.get_counterparty_node_id())); PendingSweepBalance::from_tracked_spendable_output( out, diff --git a/tests/common/mod.rs b/tests/common/mod.rs index eb4fa816a..4dc0b110c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1438,6 +1438,7 @@ impl TestSyncStoreInner { sql_dir, Some("test_sync_db".to_string()), Some("test_sync_table".to_string()), + None, ) .unwrap(); let test_store = TestStore::new(false); From 8594e54817b1979e43087f2acba5c21782463e8f Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Mon, 24 Nov 2025 13:52:49 +0700 Subject: [PATCH 182/184] chore: remove commented out code --- src/builder.rs | 74 -------------------------------------------- src/chain/mod.rs | 36 --------------------- src/event.rs | 29 ----------------- src/lib.rs | 12 ++----- src/payment/store.rs | 6 ---- src/types.rs | 8 ----- 6 files changed, 2 insertions(+), 163 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index dfbf3e2b6..f780ac9bb 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1564,80 +1564,6 @@ fn build_with_store_internal( Arc::clone(&logger), )); - /*let chain_source = match chain_data_source_config { - Some(ChainDataSourceConfig::Esplora { server_url, sync_config }) => { - log_info!(logger, "Using esplora server: {}", server_url); - let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); - Arc::new(ChainSource::new_esplora( - server_url.clone(), - sync_config, - Arc::clone(&wallet), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )) - }, - Some(ChainDataSourceConfig::Electrum { server_url, sync_config }) => { - let sync_config = sync_config.unwrap_or(ElectrumSyncConfig::default()); - Arc::new(ChainSource::new_electrum( - server_url.clone(), - sync_config, - Arc::clone(&wallet), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )) - }, - Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }) => { - Arc::new(ChainSource::new_bitcoind_rpc( - rpc_host.clone(), - *rpc_port, - rpc_user.clone(), - rpc_password.clone(), - Arc::clone(&wallet), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )) - }, - None => { - // Default to Esplora client. - let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); - let sync_config = EsploraSyncConfig::default(); - Arc::new(ChainSource::new_esplora( - server_url.clone(), - sync_config, - Arc::clone(&wallet), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )) - }, - }; - - let runtime = Arc::new(RwLock::new(None)); - - // Initialize the ChainMonitor - let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( - Some(Arc::clone(&chain_source)), - Arc::clone(&tx_broadcaster), - Arc::clone(&logger), - Arc::clone(&fee_estimator), - Arc::clone(&kv_store), - ));*/ - // Initialize the KeysManager let cur_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).map_err(|e| { log_error!(logger, "Failed to get current time: {}", e); diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 3c0d2c562..6fc336e68 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -8,42 +8,6 @@ mod bitcoind; mod electrum; -/*use crate::chain::bitcoind_rpc::{ - BitcoindRpcClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, -}; -use crate::chain::electrum::ElectrumRuntimeClient; -use crate::config::{ - BackgroundSyncConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, - BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, - LDK_WALLET_SYNC_TIMEOUT_SECS, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, - TX_BROADCAST_TIMEOUT_SECS, WALLET_SYNC_INTERVAL_MINIMUM_SECS, -}; -use crate::fee_estimator::{ - apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, - ConfirmationTarget, OnchainFeeEstimator, -}; -use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; -use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; -use crate::{Error, NodeMetrics}; - -use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; -use lightning::chain::{Confirm, Filter, Listen, WatchedOutput}; -use lightning::util::ser::Writeable; - -use lightning_transaction_sync::EsploraSyncClient; - -use lightning_block_sync::gossip::UtxoSource; -use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; -use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; -use lightning_block_sync::SpvClient; - -use bdk_esplora::EsploraAsyncExt; -use bdk_wallet::Update as BdkUpdate; - -use esplora_client::AsyncClient as EsploraAsyncClient; - -use bitcoin::{FeeRate, Network, Script, ScriptBuf, Txid};*/ mod esplora; use std::collections::HashMap; diff --git a/src/event.rs b/src/event.rs index 6a96473a1..d33c73d2e 100644 --- a/src/event.rs +++ b/src/event.rs @@ -40,35 +40,6 @@ use crate::io::{ EVENT_QUEUE_PERSISTENCE_KEY, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, }; -/*use crate::logger::{log_debug, log_error, log_info, LdkLogger}; - -use lightning::events::bump_transaction::BumpTransactionEvent; -use lightning::events::{ClosureReason, PaymentPurpose, ReplayEvent}; -use lightning::events::{Event as LdkEvent, PaymentFailureReason}; -use lightning::impl_writeable_tlv_based_enum; -use lightning::ln::channelmanager::PaymentId; -use lightning::ln::types::ChannelId; -use lightning::routing::gossip::NodeId; -use lightning::util::errors::APIError; -use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; - -use lightning_types::payment::{PaymentHash, PaymentPreimage}; - -use lightning_liquidity::lsps2::utils::compute_opening_fee; - -use bitcoin::blockdata::locktime::absolute::LockTime; -use bitcoin::secp256k1::PublicKey; -use bitcoin::{Amount, OutPoint}; - -use rand::{thread_rng, Rng}; - -use core::future::Future; -use core::task::{Poll, Waker}; -use std::collections::VecDeque; -use std::ops::Deref; -use std::str::FromStr; -use std::sync::{Arc, Condvar, Mutex, RwLock}; -use std::time::Duration;*/ use crate::liquidity::LiquiditySource; use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; diff --git a/src/lib.rs b/src/lib.rs index 872d0853f..bf6579122 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -118,16 +118,8 @@ pub use builder::BuildError; pub use builder::NodeBuilder as Builder; use chain::ChainSource; use config::{ - //default_user_config, may_announce_channel, ChannelConfig, Config, - //BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS, LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, - default_user_config, - may_announce_channel, - AsyncPaymentsRole, - ChannelConfig, - Config, - NODE_ANN_BCAST_INTERVAL, - PEER_RECONNECTION_INTERVAL, - RGS_SYNC_INTERVAL, + default_user_config, may_announce_channel, AsyncPaymentsRole, ChannelConfig, Config, + NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; pub use error::Error as NodeError; diff --git a/src/payment/store.rs b/src/payment/store.rs index dae50a50d..8ab3d12a2 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -48,12 +48,6 @@ pub struct PaymentDetails { pub status: PaymentStatus, /// The timestamp, in seconds since start of the UNIX epoch, when this entry was last updated. pub latest_update_timestamp: u64, - - // Old Alby fields - duplicates of new LDK fields - /*/// Alby: Last update timestamp, as seconds since Unix epoch. TODO: remove and use latest_update_timestamp - pub last_update: u64, - /// Alby: Fee paid. TODO: remove and use fee_paid_msat - pub fee_msat: Option,*/ /// Alby: Payment creation timestamp, as seconds since Unix epoch. pub created_at: u64, } diff --git a/src/types.rs b/src/types.rs index ec3b99572..c0ae1f466 100644 --- a/src/types.rs +++ b/src/types.rs @@ -383,14 +383,6 @@ pub struct ChannelDetails { impl From for ChannelDetails { fn from(value: LdkChannelDetails) -> Self { - // let channel_type = value.channel_type.map(|t| { - // if t.requires_anchors_zero_fee_htlc_tx() { - // ChannelType::Anchors - // } else { - // ChannelType::StaticRemoteKey - // } - // }); - ChannelDetails { channel_id: value.channel_id, counterparty_node_id: value.counterparty.node_id, From bfece53850bfedae6081576ee612a7962cfb53e6 Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Mon, 24 Nov 2025 14:37:13 +0700 Subject: [PATCH 183/184] chore: panic if unexpected namespaces are listed from vss store --- src/io/utils.rs | 3 ++- src/io/vss_store.rs | 36 ++++++++++++++++-------------------- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/src/io/utils.rs b/src/io/utils.rs index 1b4b02a82..26b586889 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -45,7 +45,7 @@ use crate::fee_estimator::OnchainFeeEstimator; use crate::io::{ NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, }; -use crate::logger::{log_error, LdkLogger, Logger}; +use crate::logger::{log_error, log_trace, LdkLogger, Logger}; use crate::peer_store::PeerStore; use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper, WordCount}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; @@ -139,6 +139,7 @@ pub(crate) fn read_network_graph( where L::Target: LdkLogger, { + log_trace!(logger, "Reading network graph"); let mut reader = Cursor::new(KVStoreSync::read( &*kv_store, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 61b1f743f..74875b6cb 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -303,16 +303,14 @@ impl KVStoreSync for VssStore { } fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { - // FIXME: list keys - /* - // Alby: also list keys from secondary storage - let secondary_keys = - self.secondary_kv_store.list(primary_namespace, secondary_namespace)?; - - let all_keys: Vec = - keys.iter().cloned().chain(secondary_keys.iter().cloned()).collect(); - Ok(all_keys) - */ + // Alby: we use a secondary store for the network graph and currently don't support merging results + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE { + panic!("Alby: cannot list from NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE"); + } + if primary_namespace == "" { + panic!("Alby: cannot list from empty primary namespace"); + } + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { debug_assert!(false, "Failed to access internal runtime"); let msg = format!("Failed to access internal runtime"); @@ -433,16 +431,14 @@ impl KVStore for VssStore { fn list( &self, primary_namespace: &str, secondary_namespace: &str, ) -> Pin, io::Error>> + Send>> { - // FIXME: list keys - /* - // Alby: also list keys from secondary storage - let secondary_keys = - self.secondary_kv_store.list(primary_namespace, secondary_namespace)?; - - let all_keys: Vec = - keys.iter().cloned().chain(secondary_keys.iter().cloned()).collect(); - Ok(all_keys) - */ + // Alby: we use a secondary store for the network graph and currently don't support merging results + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE { + panic!("Alby: cannot list from NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE"); + } + if primary_namespace == "" { + panic!("Alby: cannot list from empty primary namespace"); + } + let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let inner = Arc::clone(&self.inner); From bc0968dbce56c2edf3ea8a426da6cad466b45726 Mon Sep 17 00:00:00 2001 From: Roland Bewick Date: Mon, 24 Nov 2025 14:40:36 +0700 Subject: [PATCH 184/184] fix: incorrect unused imports error --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index bf6579122..aa9efbd9c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -171,7 +171,8 @@ pub use { use crate::scoring::setup_background_pathfinding_scores_sync; -use crate::types::{KeyValue, MigrateStorage, ResetState, TlvEntry}; +use crate::types::KeyValue; +pub use crate::types::{MigrateStorage, ResetState, TlvEntry}; #[cfg(feature = "uniffi")] uniffi::include_scaffolding!("ldk_node");