diff --git a/chainntnfs/bitcoindnotify/bitcoind_test.go b/chainntnfs/bitcoindnotify/bitcoind_test.go index e48423bcb11..4b670854f59 100644 --- a/chainntnfs/bitcoindnotify/bitcoind_test.go +++ b/chainntnfs/bitcoindnotify/bitcoind_test.go @@ -44,7 +44,7 @@ func initHintCache(t *testing.T) *chainntnfs.HeightHintCache { testCfg := chainntnfs.CacheConfig{ QueryDisable: false, } - hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db) + hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db.Backend) if err != nil { t.Fatalf("unable to create hint cache: %v", err) } diff --git a/chainntnfs/btcdnotify/btcd_test.go b/chainntnfs/btcdnotify/btcd_test.go index 7302171c1bf..798275f06ea 100644 --- a/chainntnfs/btcdnotify/btcd_test.go +++ b/chainntnfs/btcdnotify/btcd_test.go @@ -42,7 +42,7 @@ func initHintCache(t *testing.T) *chainntnfs.HeightHintCache { testCfg := chainntnfs.CacheConfig{ QueryDisable: false, } - hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db) + hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db.Backend) if err != nil { t.Fatalf("unable to create hint cache: %v", err) } diff --git a/chainntnfs/height_hint_cache.go b/chainntnfs/height_hint_cache.go index 0cbc28ad8ee..b2859ac3726 100644 --- a/chainntnfs/height_hint_cache.go +++ b/chainntnfs/height_hint_cache.go @@ -84,7 +84,7 @@ type ConfirmHintCache interface { // will be stored. type HeightHintCache struct { cfg CacheConfig - db *channeldb.DB + db kvdb.Backend } // Compile-time checks to ensure HeightHintCache satisfies the SpendHintCache @@ -93,7 +93,9 @@ var _ SpendHintCache = (*HeightHintCache)(nil) var _ ConfirmHintCache = (*HeightHintCache)(nil) // NewHeightHintCache returns a new height hint cache backed by a database. -func NewHeightHintCache(cfg CacheConfig, db *channeldb.DB) (*HeightHintCache, error) { +func NewHeightHintCache(cfg CacheConfig, db kvdb.Backend) (*HeightHintCache, + error) { + cache := &HeightHintCache{cfg, db} if err := cache.initBuckets(); err != nil { return nil, err @@ -105,7 +107,7 @@ func NewHeightHintCache(cfg CacheConfig, db *channeldb.DB) (*HeightHintCache, er // initBuckets ensures that the primary buckets used by the circuit are // initialized so that we can assume their existence after startup. func (c *HeightHintCache) initBuckets() error { - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + return kvdb.Batch(c.db, func(tx kvdb.RwTx) error { _, err := tx.CreateTopLevelBucket(spendHintBucket) if err != nil { return err @@ -127,7 +129,7 @@ func (c *HeightHintCache) CommitSpendHint(height uint32, Log.Tracef("Updating spend hint to height %d for %v", height, spendRequests) - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + return kvdb.Batch(c.db, func(tx kvdb.RwTx) error { spendHints := tx.ReadWriteBucket(spendHintBucket) if spendHints == nil { return ErrCorruptedHeightHintCache @@ -197,7 +199,7 @@ func (c *HeightHintCache) PurgeSpendHint(spendRequests ...SpendRequest) error { Log.Tracef("Removing spend hints for %v", spendRequests) - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + return kvdb.Batch(c.db, func(tx kvdb.RwTx) error { spendHints := tx.ReadWriteBucket(spendHintBucket) if spendHints == nil { return ErrCorruptedHeightHintCache @@ -228,7 +230,7 @@ func (c *HeightHintCache) CommitConfirmHint(height uint32, Log.Tracef("Updating confirm hints to height %d for %v", height, confRequests) - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + return kvdb.Batch(c.db, func(tx kvdb.RwTx) error { confirmHints := tx.ReadWriteBucket(confirmHintBucket) if confirmHints == nil { return ErrCorruptedHeightHintCache @@ -299,7 +301,7 @@ func (c *HeightHintCache) PurgeConfirmHint(confRequests ...ConfRequest) error { Log.Tracef("Removing confirm hints for %v", confRequests) - return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + return kvdb.Batch(c.db, func(tx kvdb.RwTx) error { confirmHints := tx.ReadWriteBucket(confirmHintBucket) if confirmHints == nil { return ErrCorruptedHeightHintCache diff --git a/chainntnfs/height_hint_cache_test.go b/chainntnfs/height_hint_cache_test.go index 7909f82f1c9..ad2f4842680 100644 --- a/chainntnfs/height_hint_cache_test.go +++ b/chainntnfs/height_hint_cache_test.go @@ -32,7 +32,7 @@ func initHintCacheWithConfig(t *testing.T, cfg CacheConfig) *HeightHintCache { if err != nil { t.Fatalf("unable to create db: %v", err) } - hintCache, err := NewHeightHintCache(cfg, db) + hintCache, err := NewHeightHintCache(cfg, db.Backend) if err != nil { t.Fatalf("unable to create hint cache: %v", err) } diff --git a/chainntnfs/test/test_interface.go b/chainntnfs/test/test_interface.go index bd6b70aca8b..d85cc802561 100644 --- a/chainntnfs/test/test_interface.go +++ b/chainntnfs/test/test_interface.go @@ -1926,7 +1926,9 @@ func TestInterfaces(t *testing.T, targetBackEnd string) { testCfg := chainntnfs.CacheConfig{ QueryDisable: false, } - hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db) + hintCache, err := chainntnfs.NewHeightHintCache( + testCfg, db.Backend, + ) if err != nil { t.Fatalf("unable to create height hint cache: %v", err) } diff --git a/chainreg/chainregistry.go b/chainreg/chainregistry.go index 459dae15d9d..ad0b2f54b56 100644 --- a/chainreg/chainregistry.go +++ b/chainreg/chainregistry.go @@ -28,6 +28,7 @@ import ( "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/btcwallet" @@ -68,11 +69,13 @@ type Config struct { // LtcdMode defines settings for connecting to an ltcd node. LtcdMode *lncfg.Btcd - // LocalChanDB is a pointer to the local backing channel database. - LocalChanDB *channeldb.DB + // HeightHintDB is a pointer to the database that stores the height + // hints. + HeightHintDB kvdb.Backend - // RemoteChanDB is a pointer to the remote backing channel database. - RemoteChanDB *channeldb.DB + // ChanStateDB is a pointer to the database that stores the channel + // state. + ChanStateDB *channeldb.DB // BlockCacheSize is the size (in bytes) of blocks kept in memory. BlockCacheSize uint64 @@ -304,7 +307,7 @@ func NewChainControl(cfg *Config, blockCache *blockcache.BlockCache) ( // Initialize the height hint cache within the chain directory. hintCache, err := chainntnfs.NewHeightHintCache( - heightHintCacheConfig, cfg.LocalChanDB, + heightHintCacheConfig, cfg.HeightHintDB, ) if err != nil { return nil, nil, fmt.Errorf("unable to initialize height hint "+ @@ -684,7 +687,7 @@ func NewChainControl(cfg *Config, blockCache *blockcache.BlockCache) ( // Create, and start the lnwallet, which handles the core payment // channel logic, and exposes control via proxy state machines. walletCfg := lnwallet.Config{ - Database: cfg.RemoteChanDB, + Database: cfg.ChanStateDB, Notifier: cc.ChainNotifier, WalletController: wc, Signer: cc.Signer, diff --git a/channeldb/db.go b/channeldb/db.go index 75095b01a40..e1a29dc1c2e 100644 --- a/channeldb/db.go +++ b/channeldb/db.go @@ -10,7 +10,6 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcwallet/walletdb" "github.com/go-errors/errors" mig "github.com/lightningnetwork/lnd/channeldb/migration" "github.com/lightningnetwork/lnd/channeldb/migration12" @@ -25,8 +24,7 @@ import ( ) const ( - dbName = "channel.db" - dbFilePermission = 0600 + dbName = "channel.db" ) var ( @@ -214,46 +212,6 @@ type DB struct { dryRun bool } -// Update is a wrapper around walletdb.Update which calls into the extended -// backend when available. This call is needed to be able to cast DB to -// ExtendedBackend. The passed reset function is called before the start of the -// transaction and can be used to reset intermediate state. As callers may -// expect retries of the f closure (depending on the database backend used), the -// reset function will be called before each retry respectively. -func (db *DB) Update(f func(tx walletdb.ReadWriteTx) error, reset func()) error { - if v, ok := db.Backend.(kvdb.ExtendedBackend); ok { - return v.Update(f, reset) - } - - reset() - return walletdb.Update(db, f) -} - -// View is a wrapper around walletdb.View which calls into the extended -// backend when available. This call is needed to be able to cast DB to -// ExtendedBackend. The passed reset function is called before the start of the -// transaction and can be used to reset intermediate state. As callers may -// expect retries of the f closure (depending on the database backend used), the -// reset function will be called before each retry respectively. -func (db *DB) View(f func(tx walletdb.ReadTx) error, reset func()) error { - if v, ok := db.Backend.(kvdb.ExtendedBackend); ok { - return v.View(f, reset) - } - - reset() - return walletdb.View(db, f) -} - -// PrintStats calls into the extended backend if available. This call is needed -// to be able to cast DB to ExtendedBackend. -func (db *DB) PrintStats() string { - if v, ok := db.Backend.(kvdb.ExtendedBackend); ok { - return v.PrintStats() - } - - return "unimplemented" -} - // Open opens or creates channeldb. Any necessary schemas migrations due // to updates will take place as necessary. // TODO(bhandras): deprecate this function. @@ -449,7 +407,7 @@ func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) // stored currently active/open channels associated with the target nodeID. In // the case that no active channels are known to have been created with this // node, then a zero-length slice is returned. -func (db *DB) fetchOpenChannels(tx kvdb.RTx, +func (d *DB) fetchOpenChannels(tx kvdb.RTx, nodeID *btcec.PublicKey) ([]*OpenChannel, error) { // Get the bucket dedicated to storing the metadata for open channels. @@ -485,7 +443,7 @@ func (db *DB) fetchOpenChannels(tx kvdb.RTx, // Finally, we both of the necessary buckets retrieved, fetch // all the active channels related to this node. - nodeChannels, err := db.fetchNodeChannels(chainBucket) + nodeChannels, err := d.fetchNodeChannels(chainBucket) if err != nil { return fmt.Errorf("unable to read channel for "+ "chain_hash=%x, node_key=%x: %v", @@ -502,7 +460,7 @@ func (db *DB) fetchOpenChannels(tx kvdb.RTx, // fetchNodeChannels retrieves all active channels from the target chainBucket // which is under a node's dedicated channel bucket. This function is typically // used to fetch all the active channels related to a particular node. -func (db *DB) fetchNodeChannels(chainBucket kvdb.RBucket) ([]*OpenChannel, error) { +func (d *DB) fetchNodeChannels(chainBucket kvdb.RBucket) ([]*OpenChannel, error) { var channels []*OpenChannel @@ -528,7 +486,7 @@ func (db *DB) fetchNodeChannels(chainBucket kvdb.RBucket) ([]*OpenChannel, error return fmt.Errorf("unable to read channel data for "+ "chan_point=%v: %v", outPoint, err) } - oChannel.Db = db + oChannel.Db = d channels = append(channels, oChannel) @@ -990,8 +948,8 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error { // pruneLinkNode determines whether we should garbage collect a link node from // the database due to no longer having any open channels with it. If there are // any left, then this acts as a no-op. -func (db *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error { - openChannels, err := db.fetchOpenChannels(tx, remotePub) +func (d *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error { + openChannels, err := d.fetchOpenChannels(tx, remotePub) if err != nil { return fmt.Errorf("unable to fetch open channels for peer %x: "+ "%v", remotePub.SerializeCompressed(), err) @@ -1004,7 +962,7 @@ func (db *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error { log.Infof("Pruning link node %x with zero open channels from database", remotePub.SerializeCompressed()) - return db.deleteLinkNode(tx, remotePub) + return d.deleteLinkNode(tx, remotePub) } // PruneLinkNodes attempts to prune all link nodes found within the databse with @@ -1140,16 +1098,16 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) { // database. If the channel was already removed (has a closed channel entry), // then we'll return a nil error. Otherwise, we'll insert a new close summary // into the database. -func (db *DB) AbandonChannel(chanPoint *wire.OutPoint, bestHeight uint32) error { +func (d *DB) AbandonChannel(chanPoint *wire.OutPoint, bestHeight uint32) error { // With the chanPoint constructed, we'll attempt to find the target // channel in the database. If we can't find the channel, then we'll // return the error back to the caller. - dbChan, err := db.FetchChannel(*chanPoint) + dbChan, err := d.FetchChannel(*chanPoint) switch { // If the channel wasn't found, then it's possible that it was already // abandoned from the database. case err == ErrChannelNotFound: - _, closedErr := db.FetchClosedChannel(chanPoint) + _, closedErr := d.FetchClosedChannel(chanPoint) if closedErr != nil { return closedErr } @@ -1312,9 +1270,9 @@ func fetchHistoricalChanBucket(tx kvdb.RTx, // FetchHistoricalChannel fetches open channel data from the historical channel // bucket. -func (db *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) { +func (d *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) { var channel *OpenChannel - err := kvdb.View(db, func(tx kvdb.RTx) error { + err := kvdb.View(d, func(tx kvdb.RTx) error { chanBucket, err := fetchHistoricalChanBucket(tx, outPoint) if err != nil { return err diff --git a/channeldb/graph.go b/channeldb/graph.go index f396739bd4d..678b7ac06c4 100644 --- a/channeldb/graph.go +++ b/channeldb/graph.go @@ -3406,7 +3406,7 @@ func (c *ChannelGraph) MarkEdgeZombie(chanID uint64, c.cacheMu.Lock() defer c.cacheMu.Unlock() - err := kvdb.Batch(c.db, func(tx kvdb.RwTx) error { + err := kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { edges := tx.ReadWriteBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound diff --git a/channeldb/nodes.go b/channeldb/nodes.go index f17662275f5..88d98d6ae0e 100644 --- a/channeldb/nodes.go +++ b/channeldb/nodes.go @@ -61,7 +61,7 @@ type LinkNode struct { // NewLinkNode creates a new LinkNode from the provided parameters, which is // backed by an instance of channeldb. -func (db *DB) NewLinkNode(bitNet wire.BitcoinNet, pub *btcec.PublicKey, +func (d *DB) NewLinkNode(bitNet wire.BitcoinNet, pub *btcec.PublicKey, addrs ...net.Addr) *LinkNode { return &LinkNode{ @@ -69,7 +69,7 @@ func (db *DB) NewLinkNode(bitNet wire.BitcoinNet, pub *btcec.PublicKey, IdentityPub: pub, LastSeen: time.Now(), Addresses: addrs, - db: db, + db: d, } } @@ -129,13 +129,13 @@ func putLinkNode(nodeMetaBucket kvdb.RwBucket, l *LinkNode) error { // DeleteLinkNode removes the link node with the given identity from the // database. -func (db *DB) DeleteLinkNode(identity *btcec.PublicKey) error { - return kvdb.Update(db, func(tx kvdb.RwTx) error { - return db.deleteLinkNode(tx, identity) +func (d *DB) DeleteLinkNode(identity *btcec.PublicKey) error { + return kvdb.Update(d, func(tx kvdb.RwTx) error { + return d.deleteLinkNode(tx, identity) }, func() {}) } -func (db *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) error { +func (d *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) error { nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket) if nodeMetaBucket == nil { return ErrLinkNodesNotFound @@ -148,9 +148,9 @@ func (db *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) error { // FetchLinkNode attempts to lookup the data for a LinkNode based on a target // identity public key. If a particular LinkNode for the passed identity public // key cannot be found, then ErrNodeNotFound if returned. -func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) { +func (d *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) { var linkNode *LinkNode - err := kvdb.View(db, func(tx kvdb.RTx) error { + err := kvdb.View(d, func(tx kvdb.RTx) error { node, err := fetchLinkNode(tx, identity) if err != nil { return err @@ -191,10 +191,10 @@ func fetchLinkNode(tx kvdb.RTx, targetPub *btcec.PublicKey) (*LinkNode, error) { // FetchAllLinkNodes starts a new database transaction to fetch all nodes with // whom we have active channels with. -func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) { +func (d *DB) FetchAllLinkNodes() ([]*LinkNode, error) { var linkNodes []*LinkNode - err := kvdb.View(db, func(tx kvdb.RTx) error { - nodes, err := db.fetchAllLinkNodes(tx) + err := kvdb.View(d, func(tx kvdb.RTx) error { + nodes, err := d.fetchAllLinkNodes(tx) if err != nil { return err } @@ -213,7 +213,7 @@ func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) { // fetchAllLinkNodes uses an existing database transaction to fetch all nodes // with whom we have active channels with. -func (db *DB) fetchAllLinkNodes(tx kvdb.RTx) ([]*LinkNode, error) { +func (d *DB) fetchAllLinkNodes(tx kvdb.RTx) ([]*LinkNode, error) { nodeMetaBucket := tx.ReadBucket(nodeInfoBucket) if nodeMetaBucket == nil { return nil, ErrLinkNodesNotFound diff --git a/channeldb/payments.go b/channeldb/payments.go index 1abe54a57e4..f044d0f1e5d 100644 --- a/channeldb/payments.go +++ b/channeldb/payments.go @@ -233,10 +233,10 @@ type PaymentCreationInfo struct { // FetchPayments returns all sent payments found in the DB. // // nolint: dupl -func (db *DB) FetchPayments() ([]*MPPayment, error) { +func (d *DB) FetchPayments() ([]*MPPayment, error) { var payments []*MPPayment - err := kvdb.View(db, func(tx kvdb.RTx) error { + err := kvdb.View(d, func(tx kvdb.RTx) error { paymentsBucket := tx.ReadBucket(paymentsRootBucket) if paymentsBucket == nil { return nil @@ -510,10 +510,10 @@ type PaymentsResponse struct { // QueryPayments is a query to the payments database which is restricted // to a subset of payments by the payments query, containing an offset // index and a maximum number of returned payments. -func (db *DB) QueryPayments(query PaymentsQuery) (PaymentsResponse, error) { +func (d *DB) QueryPayments(query PaymentsQuery) (PaymentsResponse, error) { var resp PaymentsResponse - if err := kvdb.View(db, func(tx kvdb.RTx) error { + if err := kvdb.View(d, func(tx kvdb.RTx) error { // Get the root payments bucket. paymentsBucket := tx.ReadBucket(paymentsRootBucket) if paymentsBucket == nil { @@ -681,8 +681,8 @@ func fetchPaymentWithSequenceNumber(tx kvdb.RTx, paymentHash lntypes.Hash, // failedOnly is set, only failed payments will be considered for deletion. If // failedHtlsOnly is set, the payment itself won't be deleted, only failed HTLC // attempts. -func (db *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) error { - return kvdb.Update(db, func(tx kvdb.RwTx) error { +func (d *DB) DeletePayments(failedOnly, failedHtlcsOnly bool) error { + return kvdb.Update(d, func(tx kvdb.RwTx) error { payments := tx.ReadWriteBucket(paymentsRootBucket) if payments == nil { return nil diff --git a/channeldb/reports.go b/channeldb/reports.go index 5d2892b2885..c94c1933e82 100644 --- a/channeldb/reports.go +++ b/channeldb/reports.go @@ -214,7 +214,7 @@ func (d DB) FetchChannelReports(chainHash chainhash.Hash, var reports []*ResolverReport - if err := kvdb.View(d, func(tx kvdb.RTx) error { + if err := kvdb.View(d.Backend, func(tx kvdb.RTx) error { chanBucket, err := fetchReportReadBucket( tx, chainHash, outPoint, ) diff --git a/config.go b/config.go index d795feed06c..cd01c70ea2b 100644 --- a/config.go +++ b/config.go @@ -203,8 +203,6 @@ var ( bitcoindEstimateModes = [2]string{"ECONOMICAL", defaultBitcoindEstimateMode} defaultPrunedNodeMaxPeers = 4 - - defaultSphinxDbName = "sphinxreplay.db" ) // Config defines the configuration options for lnd. @@ -1228,6 +1226,12 @@ func ValidateConfig(cfg Config, usageMessage string, lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name), ) + // We need to make sure the default network directory exists for when we + // try to create our default macaroons there. + if err := makeDirectory(cfg.networkDir); err != nil { + return nil, err + } + // If a custom macaroon directory wasn't specified and the data // directory has changed from the default path, then we'll also update // the path for the macaroons to be generated. @@ -1489,12 +1493,13 @@ func ValidateConfig(cfg Config, usageMessage string, return &cfg, err } -// localDatabaseDir returns the default directory where the -// local bolt db files are stored. -func (c *Config) localDatabaseDir() string { - return filepath.Join(c.DataDir, - defaultGraphSubDirname, - lncfg.NormalizeNetwork(c.ActiveNetParams.Name)) +// graphDatabaseDir returns the default directory where the local bolt graph db +// files are stored. +func (c *Config) graphDatabaseDir() string { + return filepath.Join( + c.DataDir, defaultGraphSubDirname, + lncfg.NormalizeNetwork(c.ActiveNetParams.Name), + ) } // CleanAndExpandPath expands environment variables and leading ~ in the diff --git a/docs/release-notes/release-notes-0.14.0.md b/docs/release-notes/release-notes-0.14.0.md index 3cf1f09c736..80a8c80d68d 100644 --- a/docs/release-notes/release-notes-0.14.0.md +++ b/docs/release-notes/release-notes-0.14.0.md @@ -1,5 +1,17 @@ # Release Notes +# Backend Enhancements & Optimizations + +## Full remote database support + +`lnd` now stores [all its data in the same remote/external +database](https://github.com/lightningnetwork/lnd/pull/5484) such as `etcd` +instead of only the channel state and wallet data. This makes `lnd` fully +stateless and therefore makes switching over to a new leader instance almost +instantaneous. Read the [guide on leader +election](https://github.com/lightningnetwork/lnd/blob/master/docs/leader_election.md) +for more information. + ## RPC Server * [Return payment address and add index from @@ -29,7 +41,7 @@ you. added](https://github.com/lightningnetwork/lnd/pull/5520) to ensure that all PRs ([aside from merge commits](https://github.com/lightningnetwork/lnd/pull/5543)) add an entry in - the release notes folder that at leasts links to PR being added. + the release notes folder that at least links to PR being added. * [A new build target itest-race](https://github.com/lightningnetwork/lnd/pull/5542) to help uncover undetected data races with our itests. diff --git a/go.mod b/go.mod index 392a55a78c1..61bbe531988 100644 --- a/go.mod +++ b/go.mod @@ -9,12 +9,11 @@ require ( github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f github.com/btcsuite/btcutil v1.0.3-0.20210527170813-e2ba6805a890 github.com/btcsuite/btcutil/psbt v1.0.3-0.20210527170813-e2ba6805a890 - github.com/btcsuite/btcwallet v0.12.1-0.20210519225359-6ab9b615576f - github.com/btcsuite/btcwallet/wallet/txauthor v1.0.1-0.20210519225359-6ab9b615576f + github.com/btcsuite/btcwallet v0.12.1-0.20210803004036-eebed51155ec + github.com/btcsuite/btcwallet/wallet/txauthor v1.0.2-0.20210803004036-eebed51155ec github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 - github.com/btcsuite/btcwallet/wallet/txsizes v1.0.1-0.20210519225359-6ab9b615576f // indirect - github.com/btcsuite/btcwallet/walletdb v1.3.5 - github.com/btcsuite/btcwallet/wtxmgr v1.3.1-0.20210706234807-aaf03fee735a + github.com/btcsuite/btcwallet/walletdb v1.3.6-0.20210803004036-eebed51155ec + github.com/btcsuite/btcwallet/wtxmgr v1.3.1-0.20210803004036-eebed51155ec github.com/davecgh/go-spew v1.1.1 github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/go-errors/errors v1.0.1 diff --git a/go.sum b/go.sum index 043937f91d9..f9f35e64ac9 100644 --- a/go.sum +++ b/go.sum @@ -85,23 +85,24 @@ github.com/btcsuite/btcutil v1.0.3-0.20210527170813-e2ba6805a890/go.mod h1:0DVlH github.com/btcsuite/btcutil/psbt v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:LVveMu4VaNSkIRTZu2+ut0HDBRuYjqGocxDMNS1KuGQ= github.com/btcsuite/btcutil/psbt v1.0.3-0.20210527170813-e2ba6805a890 h1:0xUNvvwJ7RjzBs4nCF+YrK28S5P/b4uHkpPxY1ovGY4= github.com/btcsuite/btcutil/psbt v1.0.3-0.20210527170813-e2ba6805a890/go.mod h1:LVveMu4VaNSkIRTZu2+ut0HDBRuYjqGocxDMNS1KuGQ= -github.com/btcsuite/btcwallet v0.12.1-0.20210519225359-6ab9b615576f h1:Me6OOQP2ZYttZuViKXHVegXPKz2n42zNbHI3ljPeqwU= -github.com/btcsuite/btcwallet v0.12.1-0.20210519225359-6ab9b615576f/go.mod h1:f1HuBGov5+OTp40Gh1vA+tvF6d7bbuLFTceJMRB7fXw= +github.com/btcsuite/btcwallet v0.12.1-0.20210803004036-eebed51155ec h1:MAAR//aKu+I7bnxmWJZqGTX7fU7abWFBRoSzX6ty8zw= +github.com/btcsuite/btcwallet v0.12.1-0.20210803004036-eebed51155ec/go.mod h1:LNhKxGlbwEGVQFjS4Qa7BgR6NipPhTd1/93Ay049pBw= github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU= -github.com/btcsuite/btcwallet/wallet/txauthor v1.0.1-0.20210519225359-6ab9b615576f h1:uzCtWqLJ6dlufUhpmoNgaegF87Pb9kOwPmpFYEi2up4= -github.com/btcsuite/btcwallet/wallet/txauthor v1.0.1-0.20210519225359-6ab9b615576f/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU= +github.com/btcsuite/btcwallet/wallet/txauthor v1.0.1-0.20210329233242-e0607006dce6/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU= +github.com/btcsuite/btcwallet/wallet/txauthor v1.0.2-0.20210803004036-eebed51155ec h1:nuO8goa4gbgDM4iegCztF7mTq8io9NT1DAMoPrEI6S4= +github.com/btcsuite/btcwallet/wallet/txauthor v1.0.2-0.20210803004036-eebed51155ec/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU= github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 h1:2VsfS0sBedcM5KmDzRMT3+b6xobqWveZGvjb+jFez5w= github.com/btcsuite/btcwallet/wallet/txrules v1.0.0/go.mod h1:UwQE78yCerZ313EXZwEiu3jNAtfXj2n2+c8RWiE/WNA= github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0/go.mod h1:pauEU8UuMFiThe5PB3EO+gO5kx87Me5NvdQDsTuq6cs= github.com/btcsuite/btcwallet/wallet/txsizes v1.0.1-0.20210519225359-6ab9b615576f h1:bzrmHuQ3ZGWWhGDyTL0OqihQWXGXSXNuBPkDoDB8SS4= github.com/btcsuite/btcwallet/wallet/txsizes v1.0.1-0.20210519225359-6ab9b615576f/go.mod h1:pauEU8UuMFiThe5PB3EO+gO5kx87Me5NvdQDsTuq6cs= github.com/btcsuite/btcwallet/walletdb v1.3.4/go.mod h1:oJDxAEUHVtnmIIBaa22wSBPTVcs6hUp5NKWmI8xDwwU= -github.com/btcsuite/btcwallet/walletdb v1.3.5-0.20210513043850-3a2f12e3a954/go.mod h1:oJDxAEUHVtnmIIBaa22wSBPTVcs6hUp5NKWmI8xDwwU= -github.com/btcsuite/btcwallet/walletdb v1.3.5 h1:SoxUPLgJUkyO1XqON6X7x+rjHJoIpRQov8o8X6gNoz8= github.com/btcsuite/btcwallet/walletdb v1.3.5/go.mod h1:oJDxAEUHVtnmIIBaa22wSBPTVcs6hUp5NKWmI8xDwwU= +github.com/btcsuite/btcwallet/walletdb v1.3.6-0.20210803004036-eebed51155ec h1:zcAU3Ij8SmqaE+ITtS76fua2Niq7DRNp46sJRhi8PiI= +github.com/btcsuite/btcwallet/walletdb v1.3.6-0.20210803004036-eebed51155ec/go.mod h1:oJDxAEUHVtnmIIBaa22wSBPTVcs6hUp5NKWmI8xDwwU= github.com/btcsuite/btcwallet/wtxmgr v1.3.0/go.mod h1:awQsh1n/0ZrEQ+JZgWvHeo153ubzEisf/FyNtwI0dDk= -github.com/btcsuite/btcwallet/wtxmgr v1.3.1-0.20210706234807-aaf03fee735a h1:25oMK8eFUTVMyKGHc2xX7pNkU4u208Dpf6IPVh5E+cA= -github.com/btcsuite/btcwallet/wtxmgr v1.3.1-0.20210706234807-aaf03fee735a/go.mod h1:UM38ixX8VwJ9qey4umf//0H3ndn5kSImFZ46V54Nd5Q= +github.com/btcsuite/btcwallet/wtxmgr v1.3.1-0.20210803004036-eebed51155ec h1:q2OVY/GUKpdpfaVYztVrWoTRVzyzdDQftRcgHs/6cXI= +github.com/btcsuite/btcwallet/wtxmgr v1.3.1-0.20210803004036-eebed51155ec/go.mod h1:UM38ixX8VwJ9qey4umf//0H3ndn5kSImFZ46V54Nd5Q= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/golangcrypto v0.0.0-20150304025918-53f62d9b43e8/go.mod h1:tYvUd8KLhm/oXvUeSEs2VlLghFjQt9+ZaF9ghH0JNjc= @@ -613,9 +614,9 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/htlcswitch/decayedlog.go b/htlcswitch/decayedlog.go index 1d7b95d3607..e1e1955f6b1 100644 --- a/htlcswitch/decayedlog.go +++ b/htlcswitch/decayedlog.go @@ -41,6 +41,35 @@ var ( ErrDecayedLogCorrupted = errors.New("decayed log structure corrupted") ) +// NewBoltBackendCreator returns a function that creates a new bbolt backend for +// the decayed logs database. +func NewBoltBackendCreator(dbPath, + dbFileName string) func(boltCfg *kvdb.BoltConfig) (kvdb.Backend, error) { + + return func(boltCfg *kvdb.BoltConfig) (kvdb.Backend, error) { + cfg := &kvdb.BoltBackendConfig{ + DBPath: dbPath, + DBFileName: dbFileName, + NoFreelistSync: !boltCfg.SyncFreelist, + AutoCompact: boltCfg.AutoCompact, + AutoCompactMinAge: boltCfg.AutoCompactMinAge, + DBTimeout: boltCfg.DBTimeout, + } + + // Use default path for log database. + if dbPath == "" { + cfg.DBPath = defaultDbDirectory + } + + db, err := kvdb.GetBoltBackend(cfg) + if err != nil { + return nil, fmt.Errorf("could not open boltdb: %v", err) + } + + return db, nil + } +} + // DecayedLog implements the PersistLog interface. It stores the first // HashPrefixSize bytes of a sha256-hashed shared secret along with a node's // CLTV value. It is a decaying log meaning there will be a garbage collector @@ -51,8 +80,6 @@ type DecayedLog struct { started int32 // To be used atomically. stopped int32 // To be used atomically. - cfg *kvdb.BoltBackendConfig - db kvdb.Backend notifier chainntnfs.ChainNotifier @@ -64,25 +91,11 @@ type DecayedLog struct { // NewDecayedLog creates a new DecayedLog, which caches recently seen hash // shared secrets. Entries are evicted as their cltv expires using block epochs // from the given notifier. -func NewDecayedLog(dbPath, dbFileName string, boltCfg *kvdb.BoltConfig, +func NewDecayedLog(db kvdb.Backend, notifier chainntnfs.ChainNotifier) *DecayedLog { - cfg := &kvdb.BoltBackendConfig{ - DBPath: dbPath, - DBFileName: dbFileName, - NoFreelistSync: true, - AutoCompact: boltCfg.AutoCompact, - AutoCompactMinAge: boltCfg.AutoCompactMinAge, - DBTimeout: boltCfg.DBTimeout, - } - - // Use default path for log database - if dbPath == "" { - cfg.DBPath = defaultDbDirectory - } - return &DecayedLog{ - cfg: cfg, + db: db, notifier: notifier, quit: make(chan struct{}), } @@ -96,13 +109,6 @@ func (d *DecayedLog) Start() error { return nil } - // Open the boltdb for use. - var err error - d.db, err = kvdb.GetBoltBackend(d.cfg) - if err != nil { - return fmt.Errorf("could not open boltdb: %v", err) - } - // Initialize the primary buckets used by the decayed log. if err := d.initBuckets(); err != nil { return err diff --git a/htlcswitch/decayedlog_test.go b/htlcswitch/decayedlog_test.go index baac6d9f66e..42e636536be 100644 --- a/htlcswitch/decayedlog_test.go +++ b/htlcswitch/decayedlog_test.go @@ -2,6 +2,7 @@ package htlcswitch import ( "crypto/rand" + "fmt" "io/ioutil" "os" "testing" @@ -18,20 +19,29 @@ const ( ) // tempDecayedLogPath creates a new temporary database path to back a single -// deccayed log instance. -func tempDecayedLogPath(t *testing.T) (string, string) { +// decayed log instance. +func tempDecayedLogPath(t *testing.T) string { dir, err := ioutil.TempDir("", "decayedlog") if err != nil { t.Fatalf("unable to create temporary decayed log dir: %v", err) } - return dir, "sphinxreplay.db" + return dir } // startup sets up the DecayedLog and possibly the garbage collector. -func startup(dbPath, dbFileName string, notifier bool) (sphinx.ReplayLog, +func startup(dbPath string, notifier bool) (sphinx.ReplayLog, *mock.ChainNotifier, *sphinx.HashPrefix, error) { + cfg := &kvdb.BoltConfig{ + DBTimeout: time.Second, + } + backend, err := NewBoltBackendCreator(dbPath, "sphinxreplay.db")(cfg) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to create temporary "+ + "decayed log db: %v", err) + } + var log sphinx.ReplayLog var chainNotifier *mock.ChainNotifier if notifier { @@ -44,16 +54,14 @@ func startup(dbPath, dbFileName string, notifier bool) (sphinx.ReplayLog, } // Initialize the DecayedLog object - log = NewDecayedLog( - dbPath, dbFileName, &kvdb.BoltConfig{}, chainNotifier, - ) + log = NewDecayedLog(backend, chainNotifier) } else { // Initialize the DecayedLog object - log = NewDecayedLog(dbPath, dbFileName, &kvdb.BoltConfig{}, nil) + log = NewDecayedLog(backend, nil) } // Open the channeldb (start the garbage collector) - err := log.Start() + err = log.Start() if err != nil { return nil, nil, nil, err } @@ -83,9 +91,9 @@ func shutdown(dir string, d sphinx.ReplayLog) { func TestDecayedLogGarbageCollector(t *testing.T) { t.Parallel() - dbPath, dbFileName := tempDecayedLogPath(t) + dbPath := tempDecayedLogPath(t) - d, notifier, hashedSecret, err := startup(dbPath, dbFileName, true) + d, notifier, hashedSecret, err := startup(dbPath, true) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) } @@ -144,9 +152,9 @@ func TestDecayedLogGarbageCollector(t *testing.T) { func TestDecayedLogPersistentGarbageCollector(t *testing.T) { t.Parallel() - dbPath, dbFileName := tempDecayedLogPath(t) + dbPath := tempDecayedLogPath(t) - d, _, hashedSecret, err := startup(dbPath, dbFileName, true) + d, _, hashedSecret, err := startup(dbPath, true) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) } @@ -166,7 +174,7 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) { // Shut down DecayedLog and the garbage collector along with it. d.Stop() - d2, notifier2, _, err := startup(dbPath, dbFileName, true) + d2, notifier2, _, err := startup(dbPath, true) if err != nil { t.Fatalf("Unable to restart DecayedLog: %v", err) } @@ -200,9 +208,9 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) { func TestDecayedLogInsertionAndDeletion(t *testing.T) { t.Parallel() - dbPath, dbFileName := tempDecayedLogPath(t) + dbPath := tempDecayedLogPath(t) - d, _, hashedSecret, err := startup(dbPath, dbFileName, false) + d, _, hashedSecret, err := startup(dbPath, false) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) } @@ -238,9 +246,9 @@ func TestDecayedLogInsertionAndDeletion(t *testing.T) { func TestDecayedLogStartAndStop(t *testing.T) { t.Parallel() - dbPath, dbFileName := tempDecayedLogPath(t) + dbPath := tempDecayedLogPath(t) - d, _, hashedSecret, err := startup(dbPath, dbFileName, false) + d, _, hashedSecret, err := startup(dbPath, false) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) } @@ -255,7 +263,7 @@ func TestDecayedLogStartAndStop(t *testing.T) { // Shutdown the DecayedLog's channeldb d.Stop() - d2, _, hashedSecret2, err := startup(dbPath, dbFileName, false) + d2, _, hashedSecret2, err := startup(dbPath, false) if err != nil { t.Fatalf("Unable to restart DecayedLog: %v", err) } @@ -282,7 +290,7 @@ func TestDecayedLogStartAndStop(t *testing.T) { // Shutdown the DecayedLog's channeldb d2.Stop() - d3, _, hashedSecret3, err := startup(dbPath, dbFileName, false) + d3, _, hashedSecret3, err := startup(dbPath, false) if err != nil { t.Fatalf("Unable to restart DecayedLog: %v", err) } @@ -304,9 +312,9 @@ func TestDecayedLogStartAndStop(t *testing.T) { func TestDecayedLogStorageAndRetrieval(t *testing.T) { t.Parallel() - dbPath, dbFileName := tempDecayedLogPath(t) + dbPath := tempDecayedLogPath(t) - d, _, hashedSecret, err := startup(dbPath, dbFileName, false) + d, _, hashedSecret, err := startup(dbPath, false) if err != nil { t.Fatalf("Unable to start up DecayedLog: %v", err) } diff --git a/kvdb/etcd/config.go b/kvdb/etcd/config.go index 4d6e9f85a87..5ae91c22098 100644 --- a/kvdb/etcd/config.go +++ b/kvdb/etcd/config.go @@ -1,5 +1,7 @@ package etcd +import "fmt" + // Config holds etcd configuration alongside with configuration related to our higher level interface. type Config struct { Embedded bool `long:"embedded" description:"Use embedded etcd instance instead of the external one. Note: use for testing only."` @@ -30,3 +32,51 @@ type Config struct { // single writer to the database at a time. SingleWriter bool } + +// CloneWithSubNamespace clones the current configuration and returns a new +// instance with the given sub namespace applied by appending it to the main +// namespace. +func (c *Config) CloneWithSubNamespace(subNamespace string) *Config { + ns := c.Namespace + if len(ns) == 0 { + ns = subNamespace + } else { + ns = fmt.Sprintf("%s/%s", ns, subNamespace) + } + + return &Config{ + Embedded: c.Embedded, + EmbeddedClientPort: c.EmbeddedClientPort, + EmbeddedPeerPort: c.EmbeddedPeerPort, + Host: c.Host, + User: c.User, + Pass: c.Pass, + Namespace: ns, + DisableTLS: c.DisableTLS, + CertFile: c.CertFile, + KeyFile: c.KeyFile, + InsecureSkipVerify: c.InsecureSkipVerify, + CollectStats: c.CollectStats, + SingleWriter: c.SingleWriter, + } +} + +// CloneWithSingleWriter clones the current configuration and returns a new +// instance with the single writer property set to true. +func (c *Config) CloneWithSingleWriter() *Config { + return &Config{ + Embedded: c.Embedded, + EmbeddedClientPort: c.EmbeddedClientPort, + EmbeddedPeerPort: c.EmbeddedPeerPort, + Host: c.Host, + User: c.User, + Pass: c.Pass, + Namespace: c.Namespace, + DisableTLS: c.DisableTLS, + CertFile: c.CertFile, + KeyFile: c.KeyFile, + InsecureSkipVerify: c.InsecureSkipVerify, + CollectStats: c.CollectStats, + SingleWriter: true, + } +} diff --git a/kvdb/etcd/db.go b/kvdb/etcd/db.go index 3d5b9cc7fa4..e3bad3b250d 100644 --- a/kvdb/etcd/db.go +++ b/kvdb/etcd/db.go @@ -132,7 +132,7 @@ type db struct { var _ walletdb.DB = (*db)(nil) // newEtcdBackend returns a db object initialized with the passed backend -// config. If etcd connection cannot be estabished, then returns error. +// config. If etcd connection cannot be established, then returns error. func newEtcdBackend(ctx context.Context, cfg Config) (*db, error) { clientCfg := clientv3.Config{ Context: ctx, @@ -182,7 +182,7 @@ func newEtcdBackend(ctx context.Context, cfg Config) (*db, error) { return backend, nil } -// getSTMOptions creats all STM options based on the backend config. +// getSTMOptions creates all STM options based on the backend config. func (db *db) getSTMOptions() []STMOptionFunc { opts := []STMOptionFunc{ WithAbortContext(db.ctx), diff --git a/kvdb/etcd/db_test.go b/kvdb/etcd/db_test.go index 357c4ed5cb2..ebb044f4292 100644 --- a/kvdb/etcd/db_test.go +++ b/kvdb/etcd/db_test.go @@ -38,7 +38,7 @@ func TestCopy(t *testing.T) { require.Nil(t, err) expected := map[string]string{ - BucketKey("apple"): BucketVal("apple"), + BucketKey("apple"): BucketVal("apple"), ValueKey("key", "apple"): "val", } require.Equal(t, expected, f.Dump()) diff --git a/kvdb/etcd/fixture.go b/kvdb/etcd/fixture.go index b6404a9f612..8fcfb9f5c73 100644 --- a/kvdb/etcd/fixture.go +++ b/kvdb/etcd/fixture.go @@ -11,7 +11,7 @@ import ( "github.com/btcsuite/btcwallet/walletdb" "github.com/stretchr/testify/require" - "go.etcd.io/etcd/client/v3" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" ) diff --git a/kvdb/etcd/readwrite_tx_test.go b/kvdb/etcd/readwrite_tx_test.go index c640493eeac..7c0c296debf 100644 --- a/kvdb/etcd/readwrite_tx_test.go +++ b/kvdb/etcd/readwrite_tx_test.go @@ -85,7 +85,7 @@ func TestChangeDuringUpdate(t *testing.T) { require.Equal(t, count, 2) expected := map[string]string{ - BucketKey("apple"): BucketVal("apple"), + BucketKey("apple"): BucketVal("apple"), ValueKey("key", "apple"): "value", ValueKey("key2", "apple"): "value2", } diff --git a/kvdb/go.mod b/kvdb/go.mod index 42f88ac0063..c50152142e7 100644 --- a/kvdb/go.mod +++ b/kvdb/go.mod @@ -2,7 +2,7 @@ module github.com/lightningnetwork/lnd/kvdb require ( github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f - github.com/btcsuite/btcwallet/walletdb v1.3.5-0.20210513043850-3a2f12e3a954 + github.com/btcsuite/btcwallet/walletdb v1.3.6-0.20210803004036-eebed51155ec github.com/lightningnetwork/lnd/healthcheck v1.0.0 github.com/stretchr/testify v1.7.0 go.etcd.io/bbolt v1.3.6 diff --git a/kvdb/go.sum b/kvdb/go.sum index 20502f7b04e..9cdaa065006 100644 --- a/kvdb/go.sum +++ b/kvdb/go.sum @@ -35,8 +35,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcwallet/walletdb v1.3.5-0.20210513043850-3a2f12e3a954 h1:CB6chiHPhZWmbCL7kFCADDf15V6I3EUNDgGC25jbptc= -github.com/btcsuite/btcwallet/walletdb v1.3.5-0.20210513043850-3a2f12e3a954/go.mod h1:oJDxAEUHVtnmIIBaa22wSBPTVcs6hUp5NKWmI8xDwwU= +github.com/btcsuite/btcwallet/walletdb v1.3.6-0.20210803004036-eebed51155ec h1:zcAU3Ij8SmqaE+ITtS76fua2Niq7DRNp46sJRhi8PiI= +github.com/btcsuite/btcwallet/walletdb v1.3.6-0.20210803004036-eebed51155ec/go.mod h1:oJDxAEUHVtnmIIBaa22wSBPTVcs6hUp5NKWmI8xDwwU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= diff --git a/kvdb/interface.go b/kvdb/interface.go index a54a94d82d5..87593b67844 100644 --- a/kvdb/interface.go +++ b/kvdb/interface.go @@ -13,12 +13,7 @@ import ( // database backend used), the reset function will be called before each retry // respectively. func Update(db Backend, f func(tx RwTx) error, reset func()) error { - if extendedDB, ok := db.(ExtendedBackend); ok { - return extendedDB.Update(f, reset) - } - - reset() - return walletdb.Update(db, f) + return db.Update(f, reset) } // View opens a database read transaction and executes the function f with the @@ -29,15 +24,7 @@ func Update(db Backend, f func(tx RwTx) error, reset func()) error { // expect retries of the f closure (depending on the database backend used), the // reset function will be called before each retry respectively. func View(db Backend, f func(tx RTx) error, reset func()) error { - if extendedDB, ok := db.(ExtendedBackend); ok { - return extendedDB.View(f, reset) - } - - // Since we know that walletdb simply calls into bbolt which never - // retries transactions, we'll call the reset function here before View. - reset() - - return walletdb.View(db, f) + return db.View(f, reset) } // Batch is identical to the Update call, but it attempts to combine several @@ -46,10 +33,12 @@ func View(db Backend, f func(tx RTx) error, reset func()) error { // Batch. For etcd Batch simply does an Update since combination is more complex // in that case due to STM retries. func Batch(db Backend, f func(tx RwTx) error) error { - if extendedDB, ok := db.(ExtendedBackend); ok { + // Fall back to the normal Update method if the backend doesn't support + // batching. + if _, ok := db.(walletdb.BatchDB); !ok { // Since Batch calls handle external state reset, we can safely // pass in an empty reset closure. - return extendedDB.Update(f, func() {}) + return db.Update(f, func() {}) } return walletdb.Batch(db, f) @@ -66,35 +55,6 @@ var Create = walletdb.Create // through read or read+write transactions. type Backend = walletdb.DB -// ExtendedBackend is and interface that supports View and Update and also able -// to collect database access patterns. -type ExtendedBackend interface { - Backend - - // PrintStats returns all collected stats pretty printed into a string. - PrintStats() string - - // View opens a database read transaction and executes the function f - // with the transaction passed as a parameter. After f exits, the - // transaction is rolled back. If f errors, its error is returned, not a - // rollback error (if any occur). The passed reset function is called - // before the start of the transaction and can be used to reset - // intermediate state. As callers may expect retries of the f closure - // (depending on the database backend used), the reset function will be - //called before each retry respectively. - View(f func(tx walletdb.ReadTx) error, reset func()) error - - // Update opens a database read/write transaction and executes the - // function f with the transaction passed as a parameter. After f exits, - // if f did not error, the transaction is committed. Otherwise, if f did - // error, the transaction is rolled back. If the rollback fails, the - // original error returned by f is still returned. If the commit fails, - // the commit error is returned. As callers may expect retries of the f - // closure (depending on the database backend used), the reset function - // will be called before each retry respectively. - Update(f func(tx walletdb.ReadWriteTx) error, reset func()) error -} - // Open opens an existing database for the specified type. The arguments are // specific to the database type driver. See the documentation for the database // driver for further details. @@ -129,9 +89,8 @@ type RwBucket = walletdb.ReadWriteBucket // operations. type RwCursor = walletdb.ReadWriteCursor -// ReadWriteTx represents a database transaction that can be used for both -// reads and writes. When only reads are necessary, consider using a RTx -// instead. +// RwTx represents a database transaction that can be used for both reads and +// writes. When only reads are necessary, consider using a RTx instead. type RwTx = walletdb.ReadWriteTx var ( diff --git a/lncfg/db.go b/lncfg/db.go index 8e9ebaf80f1..63023c91874 100644 --- a/lncfg/db.go +++ b/lncfg/db.go @@ -7,13 +7,41 @@ import ( "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/kvdb/etcd" + "github.com/lightningnetwork/lnd/lnwallet/btcwallet" ) const ( - dbName = "channel.db" + channelDBName = "channel.db" + macaroonDBName = "macaroons.db" + decayedLogDbName = "sphinxreplay.db" + towerClientDBName = "wtclient.db" + towerServerDBName = "watchtower.db" + BoltBackend = "bolt" EtcdBackend = "etcd" DefaultBatchCommitInterval = 500 * time.Millisecond + + // NSChannelDB is the namespace name that we use for the combined graph + // and channel state DB. + NSChannelDB = "channeldb" + + // NSMacaroonDB is the namespace name that we use for the macaroon DB. + NSMacaroonDB = "macaroondb" + + // NSDecayedLogDB is the namespace name that we use for the sphinx + // replay a.k.a. decayed log DB. + NSDecayedLogDB = "decayedlogdb" + + // NSTowerClientDB is the namespace name that we use for the watchtower + // client DB. + NSTowerClientDB = "towerclientdb" + + // NSTowerServerDB is the namespace name that we use for the watchtower + // server DB. + NSTowerServerDB = "towerserverdb" + + // NSWalletDB is the namespace name that we use for the wallet DB. + NSWalletDB = "walletdb" ) // DB holds database configuration for LND. @@ -27,7 +55,7 @@ type DB struct { Bolt *kvdb.BoltConfig `group:"bolt" namespace:"bolt" description:"Bolt settings."` } -// NewDB creates and returns a new default DB config. +// DefaultDB creates and returns a new default DB config. func DefaultDB() *DB { return &DB{ Backend: BoltBackend, @@ -79,54 +107,267 @@ func (db *DB) Init(ctx context.Context, dbPath string) error { } // DatabaseBackends is a two-tuple that holds the set of active database -// backends for the daemon. The two backends we expose are the local database -// backend, and the remote backend. The LocalDB attribute will always be -// populated. However, the remote DB will only be set if a replicated database -// is active. +// backends for the daemon. The two backends we expose are the graph database +// backend, and the channel state backend. type DatabaseBackends struct { - // LocalDB points to the local non-replicated backend. - LocalDB kvdb.Backend + // GraphDB points to the database backend that contains the less + // critical data that is accessed often, such as the channel graph and + // chain height hints. + GraphDB kvdb.Backend + + // ChanStateDB points to a possibly networked replicated backend that + // contains the critical channel state related data. + ChanStateDB kvdb.Backend + + // HeightHintDB points to a possibly networked replicated backend that + // contains the chain height hint related data. + HeightHintDB kvdb.Backend - // RemoteDB points to a possibly networked replicated backend. If no - // replicated backend is active, then this pointer will be nil. - RemoteDB kvdb.Backend + // MacaroonDB points to a database backend that stores the macaroon root + // keys. + MacaroonDB kvdb.Backend + + // DecayedLogDB points to a database backend that stores the decayed log + // data. + DecayedLogDB kvdb.Backend + + // TowerClientDB points to a database backend that stores the watchtower + // client data. This might be nil if the watchtower client is disabled. + TowerClientDB kvdb.Backend + + // TowerServerDB points to a database backend that stores the watchtower + // server data. This might be nil if the watchtower server is disabled. + TowerServerDB kvdb.Backend + + // WalletDB is an option that instructs the wallet loader where to load + // the underlying wallet database from. + WalletDB btcwallet.LoaderOption + + // Remote indicates whether the database backends are remote, possibly + // replicated instances or local bbolt backed databases. + Remote bool + + // CloseFuncs is a map of close functions for each of the initialized + // DB backends keyed by their namespace name. + CloseFuncs map[string]func() error } -// GetBackends returns a set of kvdb.Backends as set in the DB config. The -// local database will ALWAYS be non-nil, while the remote database will only -// be populated if etcd is specified. -func (db *DB) GetBackends(ctx context.Context, dbPath string) ( - *DatabaseBackends, error) { +// GetBackends returns a set of kvdb.Backends as set in the DB config. +func (db *DB) GetBackends(ctx context.Context, chanDBPath, + walletDBPath, towerServerDBPath string, towerClientEnabled, + towerServerEnabled bool) (*DatabaseBackends, error) { - var ( - localDB, remoteDB kvdb.Backend - err error - ) + // We keep track of all the kvdb backends we actually open and return a + // reference to their close function so they can be cleaned up properly + // on error or shutdown. + closeFuncs := make(map[string]func() error) + + // If we need to return early because of an error, we invoke any close + // function that has been initialized so far. + returnEarly := true + defer func() { + if !returnEarly { + return + } + + for _, closeFunc := range closeFuncs { + _ = closeFunc() + } + }() if db.Backend == EtcdBackend { - remoteDB, err = kvdb.Open( - kvdb.EtcdBackendName, ctx, db.Etcd, + // As long as the graph data, channel state and height hint + // cache are all still in the channel.db file in bolt, we + // replicate the same behavior here and use the same etcd + // backend for those three sub DBs. But we namespace it properly + // to make such a split even easier in the future. This will + // break lnd for users that ran on etcd with 0.13.x since that + // code used the root namespace. We assume that nobody used etcd + // for mainnet just yet since that feature was clearly marked as + // experimental in 0.13.x. + etcdBackend, err := kvdb.Open( + kvdb.EtcdBackendName, ctx, + db.Etcd.CloneWithSubNamespace(NSChannelDB), + ) + if err != nil { + return nil, fmt.Errorf("error opening etcd DB: %v", err) + } + closeFuncs[NSChannelDB] = etcdBackend.Close + + etcdMacaroonBackend, err := kvdb.Open( + kvdb.EtcdBackendName, ctx, + db.Etcd.CloneWithSubNamespace(NSMacaroonDB), + ) + if err != nil { + return nil, fmt.Errorf("error opening etcd macaroon "+ + "DB: %v", err) + } + closeFuncs[NSMacaroonDB] = etcdMacaroonBackend.Close + + etcdDecayedLogBackend, err := kvdb.Open( + kvdb.EtcdBackendName, ctx, + db.Etcd.CloneWithSubNamespace(NSDecayedLogDB), + ) + if err != nil { + return nil, fmt.Errorf("error opening etcd decayed "+ + "log DB: %v", err) + } + closeFuncs[NSDecayedLogDB] = etcdDecayedLogBackend.Close + + etcdTowerClientBackend, err := kvdb.Open( + kvdb.EtcdBackendName, ctx, + db.Etcd.CloneWithSubNamespace(NSTowerClientDB), + ) + if err != nil { + return nil, fmt.Errorf("error opening etcd tower "+ + "client DB: %v", err) + } + closeFuncs[NSTowerClientDB] = etcdTowerClientBackend.Close + + etcdTowerServerBackend, err := kvdb.Open( + kvdb.EtcdBackendName, ctx, + db.Etcd.CloneWithSubNamespace(NSTowerServerDB), ) if err != nil { - return nil, err + return nil, fmt.Errorf("error opening etcd tower "+ + "server DB: %v", err) } + closeFuncs[NSTowerServerDB] = etcdTowerServerBackend.Close + + etcdWalletBackend, err := kvdb.Open( + kvdb.EtcdBackendName, ctx, + db.Etcd. + CloneWithSubNamespace(NSWalletDB). + CloneWithSingleWriter(), + ) + if err != nil { + return nil, fmt.Errorf("error opening etcd macaroon "+ + "DB: %v", err) + } + closeFuncs[NSWalletDB] = etcdWalletBackend.Close + + returnEarly = false + return &DatabaseBackends{ + GraphDB: etcdBackend, + ChanStateDB: etcdBackend, + HeightHintDB: etcdBackend, + MacaroonDB: etcdMacaroonBackend, + DecayedLogDB: etcdDecayedLogBackend, + TowerClientDB: etcdTowerClientBackend, + TowerServerDB: etcdTowerServerBackend, + // The wallet loader will attempt to use/create the + // wallet in the replicated remote DB if we're running + // in a clustered environment. This will ensure that all + // members of the cluster have access to the same wallet + // state. + WalletDB: btcwallet.LoaderWithExternalWalletDB( + etcdWalletBackend, + ), + Remote: true, + CloseFuncs: closeFuncs, + }, nil } - localDB, err = kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{ - DBPath: dbPath, - DBFileName: dbName, + // We're using all bbolt based databases by default. + boltBackend, err := kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{ + DBPath: chanDBPath, + DBFileName: channelDBName, DBTimeout: db.Bolt.DBTimeout, NoFreelistSync: !db.Bolt.SyncFreelist, AutoCompact: db.Bolt.AutoCompact, AutoCompactMinAge: db.Bolt.AutoCompactMinAge, }) if err != nil { - return nil, err + return nil, fmt.Errorf("error opening bolt DB: %v", err) + } + closeFuncs[NSChannelDB] = boltBackend.Close + + macaroonBackend, err := kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{ + DBPath: walletDBPath, + DBFileName: macaroonDBName, + DBTimeout: db.Bolt.DBTimeout, + NoFreelistSync: !db.Bolt.SyncFreelist, + AutoCompact: db.Bolt.AutoCompact, + AutoCompactMinAge: db.Bolt.AutoCompactMinAge, + }) + if err != nil { + return nil, fmt.Errorf("error opening macaroon DB: %v", err) + } + closeFuncs[NSMacaroonDB] = macaroonBackend.Close + + decayedLogBackend, err := kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{ + DBPath: chanDBPath, + DBFileName: decayedLogDbName, + DBTimeout: db.Bolt.DBTimeout, + NoFreelistSync: !db.Bolt.SyncFreelist, + AutoCompact: db.Bolt.AutoCompact, + AutoCompactMinAge: db.Bolt.AutoCompactMinAge, + }) + if err != nil { + return nil, fmt.Errorf("error opening decayed log DB: %v", err) + } + closeFuncs[NSDecayedLogDB] = decayedLogBackend.Close + + // The tower client is optional and might not be enabled by the user. We + // handle it being nil properly in the main server. + var towerClientBackend kvdb.Backend + if towerClientEnabled { + towerClientBackend, err = kvdb.GetBoltBackend( + &kvdb.BoltBackendConfig{ + DBPath: chanDBPath, + DBFileName: towerClientDBName, + DBTimeout: db.Bolt.DBTimeout, + NoFreelistSync: !db.Bolt.SyncFreelist, + AutoCompact: db.Bolt.AutoCompact, + AutoCompactMinAge: db.Bolt.AutoCompactMinAge, + }, + ) + if err != nil { + return nil, fmt.Errorf("error opening tower client "+ + "DB: %v", err) + } + closeFuncs[NSTowerClientDB] = towerClientBackend.Close + } + + // The tower server is optional and might not be enabled by the user. We + // handle it being nil properly in the main server. + var towerServerBackend kvdb.Backend + if towerServerEnabled { + towerServerBackend, err = kvdb.GetBoltBackend( + &kvdb.BoltBackendConfig{ + DBPath: towerServerDBPath, + DBFileName: towerServerDBName, + DBTimeout: db.Bolt.DBTimeout, + NoFreelistSync: !db.Bolt.SyncFreelist, + AutoCompact: db.Bolt.AutoCompact, + AutoCompactMinAge: db.Bolt.AutoCompactMinAge, + }, + ) + if err != nil { + return nil, fmt.Errorf("error opening tower server "+ + "DB: %v", err) + } + closeFuncs[NSTowerServerDB] = towerServerBackend.Close } + returnEarly = false return &DatabaseBackends{ - LocalDB: localDB, - RemoteDB: remoteDB, + GraphDB: boltBackend, + ChanStateDB: boltBackend, + HeightHintDB: boltBackend, + MacaroonDB: macaroonBackend, + DecayedLogDB: decayedLogBackend, + TowerClientDB: towerClientBackend, + TowerServerDB: towerServerBackend, + // When "running locally", LND will use the bbolt wallet.db to + // store the wallet located in the chain data dir, parametrized + // by the active network. The wallet loader has its own cleanup + // method so we don't need to add anything to our map (in fact + // nothing is opened just yet). + WalletDB: btcwallet.LoaderWithLocalWalletDB( + walletDBPath, !db.Bolt.SyncFreelist, db.Bolt.DBTimeout, + ), + CloseFuncs: closeFuncs, }, nil } diff --git a/lnd.go b/lnd.go index d883c92cc04..d730fe0fe4c 100644 --- a/lnd.go +++ b/lnd.go @@ -43,6 +43,7 @@ import ( "github.com/lightningnetwork/lnd/chanacceptor" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnwallet" @@ -54,6 +55,7 @@ import ( "github.com/lightningnetwork/lnd/tor" "github.com/lightningnetwork/lnd/walletunlocker" "github.com/lightningnetwork/lnd/watchtower" + "github.com/lightningnetwork/lnd/watchtower/wtclient" "github.com/lightningnetwork/lnd/watchtower/wtdb" ) @@ -258,7 +260,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error // Run configuration dependent DB pre-initialization. Note that this // needs to be done early and once during the startup process, before // any DB access. - if err := cfg.DB.Init(ctx, cfg.localDatabaseDir()); err != nil { + if err := cfg.DB.Init(ctx, cfg.graphDatabaseDir()); err != nil { return err } @@ -449,7 +451,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error ltndLog.Infof("Elected as leader (%v)", cfg.Cluster.ID) } - localChanDB, remoteChanDB, cleanUp, err := initializeDatabases(ctx, cfg) + dbs, cleanUp, err := initializeDatabases(ctx, cfg) switch { case err == channeldb.ErrDryRunMigrationOK: ltndLog.Infof("%v, exiting", err) @@ -460,33 +462,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error defer cleanUp() - var loaderOpt btcwallet.LoaderOption - if cfg.Cluster.EnableLeaderElection { - // The wallet loader will attempt to use/create the wallet in - // the replicated remote DB if we're running in a clustered - // environment. This will ensure that all members of the cluster - // have access to the same wallet state. - loaderOpt = btcwallet.LoaderWithExternalWalletDB( - remoteChanDB.Backend, - ) - } else { - // When "running locally", LND will use the bbolt wallet.db to - // store the wallet located in the chain data dir, parametrized - // by the active network. - chainConfig := cfg.Bitcoin - if cfg.registeredChains.PrimaryChain() == chainreg.LitecoinChain { - chainConfig = cfg.Litecoin - } - - dbDirPath := btcwallet.NetworkDir( - chainConfig.ChainDir, cfg.ActiveNetParams.Params, - ) - loaderOpt = btcwallet.LoaderWithLocalWalletDB( - dbDirPath, !cfg.SyncFreelist, cfg.DB.Bolt.DBTimeout, - ) - } - - pwService.SetLoaderOpts([]btcwallet.LoaderOption{loaderOpt}) + pwService.SetLoaderOpts([]btcwallet.LoaderOption{dbs.walletDB}) + pwService.SetMacaroonDB(dbs.macaroonDB) walletExists, err := pwService.WalletExists() if err != nil { return err @@ -561,7 +538,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error // over RPC. default: params, err := waitForWalletPassword( - cfg, pwService, []btcwallet.LoaderOption{loaderOpt}, + cfg, pwService, []btcwallet.LoaderOption{dbs.walletDB}, interceptor.ShutdownChannel(), ) if err != nil { @@ -591,8 +568,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error if !cfg.NoMacaroons { // Create the macaroon authentication/authorization service. macaroonService, err = macaroons.NewService( - cfg.networkDir, "lnd", walletInitParams.StatelessInit, - cfg.DB.Bolt.DBTimeout, macaroons.IPLockChecker, + dbs.macaroonDB, "lnd", walletInitParams.StatelessInit, + macaroons.IPLockChecker, ) if err != nil { err := fmt.Errorf("unable to set up macaroon "+ @@ -706,8 +683,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error LitecoindMode: cfg.LitecoindMode, BtcdMode: cfg.BtcdMode, LtcdMode: cfg.LtcdMode, - LocalChanDB: localChanDB, - RemoteChanDB: remoteChanDB, + HeightHintDB: dbs.heightHintDB, + ChanStateDB: dbs.chanStateDB, PrivateWalletPw: privateWalletPw, PublicWalletPw: publicWalletPw, Birthday: walletInitParams.Birthday, @@ -720,9 +697,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error return cfg.net.Dial("tcp", addr, cfg.ConnectionTimeout) }, BlockCacheSize: cfg.BlockCacheSize, - LoaderOptions: []btcwallet.LoaderOption{ - loaderOpt, - }, + LoaderOptions: []btcwallet.LoaderOption{dbs.walletDB}, } // Parse coin selection strategy. @@ -775,23 +750,6 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error "is proxying over Tor as well", cfg.Tor.StreamIsolation) } - // If the watchtower client should be active, open the client database. - // This is done here so that Close always executes when lndMain returns. - var towerClientDB *wtdb.ClientDB - if cfg.WtClient.Active { - var err error - towerClientDB, err = wtdb.OpenClientDB( - cfg.localDatabaseDir(), cfg.DB.Bolt.DBTimeout, - ) - if err != nil { - err := fmt.Errorf("unable to open watchtower client "+ - "database: %v", err) - ltndLog.Error(err) - return err - } - defer towerClientDB.Close() - } - // If tor is active and either v2 or v3 onion services have been specified, // make a tor controller and pass it into both the watchtower server and // the regular lnd server. @@ -816,24 +774,6 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error var tower *watchtower.Standalone if cfg.Watchtower.Active { - // Segment the watchtower directory by chain and network. - towerDBDir := filepath.Join( - cfg.Watchtower.TowerDir, - cfg.registeredChains.PrimaryChain().String(), - lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name), - ) - - towerDB, err := wtdb.OpenTowerDB( - towerDBDir, cfg.DB.Bolt.DBTimeout, - ) - if err != nil { - err := fmt.Errorf("unable to open watchtower "+ - "database: %v", err) - ltndLog.Error(err) - return err - } - defer towerDB.Close() - towerKeyDesc, err := activeChainControl.KeyRing.DeriveKey( keychain.KeyLocator{ Family: keychain.KeyFamilyTowerID, @@ -848,7 +788,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error wtCfg := &watchtower.Config{ BlockFetcher: activeChainControl.ChainIO, - DB: towerDB, + DB: dbs.towerServerDB, EpochRegistrar: activeChainControl.ChainNotifier, Net: cfg.net, NewAddress: func() (btcutil.Address, error) { @@ -900,9 +840,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, interceptor signal.Interceptor) error // Set up the core server which will listen for incoming peer // connections. server, err := newServer( - cfg, cfg.Listeners, localChanDB, remoteChanDB, towerClientDB, - activeChainControl, &idKeyDesc, walletInitParams.ChansToRestore, - chainedAcceptor, torController, + cfg, cfg.Listeners, dbs, activeChainControl, &idKeyDesc, + walletInitParams.ChansToRestore, chainedAcceptor, torController, ) if err != nil { err := fmt.Errorf("unable to create server: %v", err) @@ -1333,11 +1272,6 @@ type WalletUnlockParams struct { // createWalletUnlockerService creates a WalletUnlockerService from the passed // config. func createWalletUnlockerService(cfg *Config) *walletunlocker.UnlockerService { - chainConfig := cfg.Bitcoin - if cfg.registeredChains.PrimaryChain() == chainreg.LitecoinChain { - chainConfig = cfg.Litecoin - } - // The macaroonFiles are passed to the wallet unlocker so they can be // deleted and recreated in case the root macaroon key is also changed // during the change password operation. @@ -1346,8 +1280,7 @@ func createWalletUnlockerService(cfg *Config) *walletunlocker.UnlockerService { } return walletunlocker.New( - chainConfig.ChainDir, cfg.ActiveNetParams.Params, - !cfg.SyncFreelist, macaroonFiles, cfg.DB.Bolt.DBTimeout, + cfg.ActiveNetParams.Params, macaroonFiles, cfg.ResetWalletTransactions, nil, ) } @@ -1621,14 +1554,24 @@ func waitForWalletPassword(cfg *Config, } } +// databaseInstances is a struct that holds all instances to the actual +// databases that are used in lnd. +type databaseInstances struct { + graphDB *channeldb.DB + chanStateDB *channeldb.DB + heightHintDB kvdb.Backend + macaroonDB kvdb.Backend + decayedLogDB kvdb.Backend + towerClientDB wtclient.DB + towerServerDB watchtower.DB + walletDB btcwallet.LoaderOption +} + // initializeDatabases extracts the current databases that we'll use for normal -// operation in the daemon. Two databases are returned: one remote and one -// local. However, only if the replicated database is active will the remote -// database point to a unique database. Otherwise, the local and remote DB will -// both point to the same local database. A function closure that closes all -// opened databases is also returned. +// operation in the daemon. A function closure that closes all opened databases +// is also returned. func initializeDatabases(ctx context.Context, - cfg *Config) (*channeldb.DB, *channeldb.DB, func(), error) { + cfg *Config) (*databaseInstances, func(), error) { ltndLog.Infof("Opening the main database, this might take a few " + "minutes...") @@ -1641,108 +1584,118 @@ func initializeDatabases(ctx context.Context, startOpenTime := time.Now() - databaseBackends, err := cfg.DB.GetBackends(ctx, cfg.localDatabaseDir()) + databaseBackends, err := cfg.DB.GetBackends( + ctx, cfg.graphDatabaseDir(), cfg.networkDir, filepath.Join( + cfg.Watchtower.TowerDir, + cfg.registeredChains.PrimaryChain().String(), + lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name), + ), cfg.WtClient.Active, cfg.Watchtower.Active, + ) if err != nil { - return nil, nil, nil, fmt.Errorf("unable to obtain database "+ + return nil, nil, fmt.Errorf("unable to obtain database "+ "backends: %v", err) } - // If the remoteDB is nil, then we'll just open a local DB as normal, - // having the remote and local pointer be the exact same instance. - var ( - localChanDB, remoteChanDB *channeldb.DB - closeFuncs []func() + // With the full remote mode we made sure both the graph and channel + // state DB point to the same local or remote DB and the same namespace + // within that DB. + dbs := &databaseInstances{ + heightHintDB: databaseBackends.HeightHintDB, + macaroonDB: databaseBackends.MacaroonDB, + decayedLogDB: databaseBackends.DecayedLogDB, + walletDB: databaseBackends.WalletDB, + } + cleanUp := func() { + // We can just close the returned close functions directly. Even + // if we decorate the channel DB with an additional struct, its + // close function still just points to the kvdb backend. + for name, closeFunc := range databaseBackends.CloseFuncs { + if err := closeFunc(); err != nil { + ltndLog.Errorf("Error closing %s "+ + "database: %v", name, err) + } + } + } + if databaseBackends.Remote { + ltndLog.Infof("Using remote %v database! Creating "+ + "graph and channel state DB instances", cfg.DB.Backend) + } else { + ltndLog.Infof("Creating local graph and channel state DB " + + "instances") + } + + // Otherwise, we'll open two instances, one for the state we only need + // locally, and the other for things we want to ensure are replicated. + dbs.graphDB, err = channeldb.CreateWithBackend( + databaseBackends.GraphDB, + channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), + channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), + channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval), + channeldb.OptionDryRunMigration(cfg.DryRunMigration), ) - if databaseBackends.RemoteDB == nil { - // Open the channeldb, which is dedicated to storing channel, - // and network related metadata. - localChanDB, err = channeldb.CreateWithBackend( - databaseBackends.LocalDB, - channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), - channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), - channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval), - channeldb.OptionDryRunMigration(cfg.DryRunMigration), - ) - switch { - case err == channeldb.ErrDryRunMigrationOK: - return nil, nil, nil, err + switch { + // Give the DB a chance to dry run the migration. Since we know that + // both the channel state and graph DBs are still always behind the same + // backend, we know this would be applied to both of those DBs. + case err == channeldb.ErrDryRunMigrationOK: + ltndLog.Infof("Graph DB dry run migration successful") + return nil, nil, err - case err != nil: - err := fmt.Errorf("unable to open local channeldb: %v", err) - ltndLog.Error(err) - return nil, nil, nil, err - } + case err != nil: + cleanUp() - closeFuncs = append(closeFuncs, func() { - localChanDB.Close() - }) + err := fmt.Errorf("unable to open graph DB: %v", err) + ltndLog.Error(err) + return nil, nil, err + } - remoteChanDB = localChanDB - } else { - ltndLog.Infof("Database replication is available! Creating " + - "local and remote channeldb instances") - - // Otherwise, we'll open two instances, one for the state we - // only need locally, and the other for things we want to - // ensure are replicated. - localChanDB, err = channeldb.CreateWithBackend( - databaseBackends.LocalDB, - channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), - channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), - channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval), - channeldb.OptionDryRunMigration(cfg.DryRunMigration), + // For now, we don't _actually_ split the graph and channel state DBs on + // the code level. Since they both are based upon the *channeldb.DB + // struct it will require more refactoring to fully separate them. With + // the full remote mode we at least know for now that they both point to + // the same DB backend (and also namespace within that) so we only need + // to apply any migration once. + // + // TODO(guggero): Once the full separation of anything graph related + // from the channeldb.DB is complete, the decorated instance of the + // channel state DB should be created here individually instead of just + // using the same struct (and DB backend) instance. + dbs.chanStateDB = dbs.graphDB + + // Wrap the watchtower client DB and make sure we clean up. + if cfg.WtClient.Active { + dbs.towerClientDB, err = wtdb.OpenClientDB( + databaseBackends.TowerClientDB, ) - switch { - // As we want to allow both versions to get thru the dry run - // migration, we'll only exit the second time here once the - // remote instance has had a time to migrate as well. - case err == channeldb.ErrDryRunMigrationOK: - ltndLog.Infof("Local DB dry run migration successful") - - case err != nil: - err := fmt.Errorf("unable to open local channeldb: %v", err) + if err != nil { + cleanUp() + + err := fmt.Errorf("unable to open %s database: %v", + lncfg.NSTowerClientDB, err) ltndLog.Error(err) - return nil, nil, nil, err + return nil, nil, err } + } - closeFuncs = append(closeFuncs, func() { - localChanDB.Close() - }) - - ltndLog.Infof("Opening replicated database instance...") - - remoteChanDB, err = channeldb.CreateWithBackend( - databaseBackends.RemoteDB, - channeldb.OptionDryRunMigration(cfg.DryRunMigration), - channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval), + // Wrap the watchtower server DB and make sure we clean up. + if cfg.Watchtower.Active { + dbs.towerServerDB, err = wtdb.OpenTowerDB( + databaseBackends.TowerServerDB, ) - switch { - case err == channeldb.ErrDryRunMigrationOK: - return nil, nil, nil, err - - case err != nil: - localChanDB.Close() + if err != nil { + cleanUp() - err := fmt.Errorf("unable to open remote channeldb: %v", err) + err := fmt.Errorf("unable to open %s database: %v", + lncfg.NSTowerServerDB, err) ltndLog.Error(err) - return nil, nil, nil, err + return nil, nil, err } - - closeFuncs = append(closeFuncs, func() { - remoteChanDB.Close() - }) } openTime := time.Since(startOpenTime) - ltndLog.Infof("Database now open (time_to_open=%v)!", openTime) - - cleanUp := func() { - for _, closeFunc := range closeFuncs { - closeFunc() - } - } + ltndLog.Infof("Database(s) now open (time_to_open=%v)!", openTime) - return localChanDB, remoteChanDB, cleanUp, nil + return dbs, cleanUp, nil } // initNeutrinoBackend inits a new instance of the neutrino light client diff --git a/lnrpc/invoicesrpc/config_active.go b/lnrpc/invoicesrpc/config_active.go index ce008a5ffc8..3246f4b7f8f 100644 --- a/lnrpc/invoicesrpc/config_active.go +++ b/lnrpc/invoicesrpc/config_active.go @@ -44,13 +44,13 @@ type Config struct { // specified. DefaultCLTVExpiry uint32 - // LocalChanDB is a global boltdb instance which is needed to access the + // GraphDB is a global database instance which is needed to access the // channel graph. - LocalChanDB *channeldb.DB + GraphDB *channeldb.ChannelGraph - // RemoteChanDB is a replicatd db instance which is the same as the - // localdb when running without remote db. - RemoteChanDB *channeldb.DB + // ChanStateDB is a possibly replicated db instance which contains the + // channel state + ChanStateDB *channeldb.DB // GenInvoiceFeatures returns a feature containing feature bits that // should be advertised on freshly generated invoices. diff --git a/lnrpc/invoicesrpc/invoices_server.go b/lnrpc/invoicesrpc/invoices_server.go index 5b883310da4..947867afc41 100644 --- a/lnrpc/invoicesrpc/invoices_server.go +++ b/lnrpc/invoicesrpc/invoices_server.go @@ -316,8 +316,8 @@ func (s *Server) AddHoldInvoice(ctx context.Context, ChainParams: s.cfg.ChainParams, NodeSigner: s.cfg.NodeSigner, DefaultCLTVExpiry: s.cfg.DefaultCLTVExpiry, - ChanDB: s.cfg.RemoteChanDB, - Graph: s.cfg.LocalChanDB.ChannelGraph(), + ChanDB: s.cfg.ChanStateDB, + Graph: s.cfg.GraphDB, GenInvoiceFeatures: s.cfg.GenInvoiceFeatures, GenAmpInvoiceFeatures: s.cfg.GenAmpInvoiceFeatures, } diff --git a/lntest/itest/lnd_channel_force_close.go b/lntest/itest/lnd_channel_force_close.go index 9077a7c4829..4af4f7049e1 100644 --- a/lntest/itest/lnd_channel_force_close.go +++ b/lntest/itest/lnd_channel_force_close.go @@ -65,10 +65,7 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, // transaction to CPFP our commitment transaction. feeRateLarge := maxPerKw * 2 - ctxt, cancel := context.WithTimeout( - context.Background(), defaultTimeout, - ) - defer cancel() + ctxb := context.Background() // Before we start, set up the default fee rate and we will test the // actual fee rate against it to decide whether we are using the @@ -76,25 +73,28 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, net.SetFeeEstimate(feeRateDefault) // setupNode creates a new node and sends 1 btc to the node. - setupNode := func(name string) *lntest.HarnessNode { + setupNode := func(ctx context.Context, name string) *lntest.HarnessNode { // Create the node. args := []string{"--hodl.exit-settle"} args = append(args, commitTypeAnchors.Args()...) node := net.NewNode(t.t, name, args) // Send some coins to the node. - net.SendCoins(ctxt, t.t, btcutil.SatoshiPerBitcoin, node) + net.SendCoins(ctx, t.t, btcutil.SatoshiPerBitcoin, node) return node } // calculateSweepFeeRate runs multiple steps to calculate the fee rate // used in sweeping the transactions. calculateSweepFeeRate := func(expectedSweepTxNum int) int64 { + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + // Create two nodes, Alice and Bob. - alice := setupNode("Alice") + alice := setupNode(ctxt, "Alice") defer shutdownAndAssert(net, t, alice) - bob := setupNode("Bob") + bob := setupNode(ctxt, "Bob") defer shutdownAndAssert(net, t, bob) // Connect Alice to Bob. @@ -102,8 +102,7 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, // Open a channel between Alice and Bob. chanPoint := openChannelAndAssert( - ctxt, t, net, alice, bob, - lntest.OpenChannelParams{ + ctxt, t, net, alice, bob, lntest.OpenChannelParams{ Amt: 10e6, PushAmt: 5e6, }, @@ -113,8 +112,7 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, // be used as our deadline later on when Alice force closes the // channel. _, err := alice.RouterClient.SendPaymentV2( - ctxt, - &routerrpc.SendPaymentRequest{ + ctxt, &routerrpc.SendPaymentRequest{ Dest: bob.PubKey[:], Amt: 10e4, PaymentHash: makeFakePayHash(t), @@ -134,6 +132,8 @@ func testCommitmentTransactionDeadline(net *lntest.NetworkHarness, require.NoError(t.t, err, "htlc mismatch") // Alice force closes the channel. + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() _, _, err = net.CloseChannel(ctxt, alice, chanPoint, true) require.NoError(t.t, err, "unable to force close channel") diff --git a/lntest/itest/lnd_hold_invoice_force_test.go b/lntest/itest/lnd_hold_invoice_force_test.go index 7a71ac74400..62c2bccd8e2 100644 --- a/lntest/itest/lnd_hold_invoice_force_test.go +++ b/lntest/itest/lnd_hold_invoice_force_test.go @@ -25,8 +25,11 @@ func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) { Amt: 300000, } - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert(ctxt, t, net, net.Alice, net.Bob, chanReq) + ctxt, cancel := context.WithTimeout(ctxb, channelOpenTimeout) + defer cancel() + chanPoint := openChannelAndAssert( + ctxt, t, net, net.Alice, net.Bob, chanReq, + ) // Create a non-dust hold invoice for bob. var ( @@ -39,7 +42,8 @@ func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) { Hash: payHash[:], } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() bobInvoice, err := net.Bob.AddHoldInvoice(ctxt, invoiceReq) require.NoError(t.t, err) @@ -72,23 +76,30 @@ func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) { require.Len(t.t, chans.Channels[0].PendingHtlcs, 1) activeHtlc := chans.Channels[0].PendingHtlcs[0] + require.NoError(t.t, net.Alice.WaitForBlockchainSync(ctxb)) + require.NoError(t.t, net.Bob.WaitForBlockchainSync(ctxb)) + info, err := net.Alice.GetInfo(ctxb, &lnrpc.GetInfoRequest{}) require.NoError(t.t, err) // Now we will mine blocks until the htlc expires, and wait for each // node to sync to our latest height. Sanity check that we won't // underflow. - require.Greater(t.t, activeHtlc.ExpirationHeight, info.BlockHeight, - "expected expiry after current height") + require.Greater( + t.t, activeHtlc.ExpirationHeight, info.BlockHeight, + "expected expiry after current height", + ) blocksTillExpiry := activeHtlc.ExpirationHeight - info.BlockHeight // Alice will go to chain with some delta, sanity check that we won't // underflow and subtract this from our mined blocks. - require.Greater(t.t, blocksTillExpiry, - uint32(lncfg.DefaultOutgoingBroadcastDelta)) + require.Greater( + t.t, blocksTillExpiry, + uint32(lncfg.DefaultOutgoingBroadcastDelta), + ) blocksTillForce := blocksTillExpiry - lncfg.DefaultOutgoingBroadcastDelta - mineBlocks(t, net, blocksTillForce, 0) + mineBlocksSlow(t, net, blocksTillForce, 0) require.NoError(t.t, net.Alice.WaitForBlockchainSync(ctxb)) require.NoError(t.t, net.Bob.WaitForBlockchainSync(ctxb)) diff --git a/lntest/itest/lnd_misc_test.go b/lntest/itest/lnd_misc_test.go index aa7bf25dabb..3aa3fddf503 100644 --- a/lntest/itest/lnd_misc_test.go +++ b/lntest/itest/lnd_misc_test.go @@ -1906,6 +1906,7 @@ func testSweepAllCoins(net *lntest.NetworkHarness, t *harnessTest) { // Next, we try to relabel our transaction without setting the overwrite // boolean. We expect this to fail, because the wallet requires setting // of this param to prevent accidental overwrite of labels. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) _, err = ainz.WalletKitClient.LabelTransaction( ctxt, &walletrpc.LabelTransactionRequest{ Txid: sweepHash[:], diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index 2ec062547ef..20acfdea9a6 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -205,9 +205,9 @@ func TestLightningNetworkDaemon(t *testing.T) { // Run the subset of the test cases selected in this tranche. for idx, testCase := range testCases { testCase := testCase - name := fmt.Sprintf("%02d-of-%d/%s/%s", - trancheOffset+uint(idx)+1, len(allTestCases), - chainBackend.Name(), testCase.name) + name := fmt.Sprintf("tranche%02d/%02d-of-%d/%s/%s", + trancheIndex, trancheOffset+uint(idx)+1, + len(allTestCases), chainBackend.Name(), testCase.name) success := t.Run(name, func(t1 *testing.T) { cleanTestCaseName := strings.ReplaceAll( diff --git a/lntest/itest/test_harness.go b/lntest/itest/test_harness.go index 90a3c4ba60a..dc4585f28fb 100644 --- a/lntest/itest/test_harness.go +++ b/lntest/itest/test_harness.go @@ -20,6 +20,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/stretchr/testify/require" ) var ( @@ -29,6 +30,8 @@ var ( lndExecutable = flag.String( "lndexec", itestLndBinary, "full path to lnd binary", ) + + slowMineDelay = 50 * time.Millisecond ) const ( @@ -238,6 +241,54 @@ func mineBlocks(t *harnessTest, net *lntest.NetworkHarness, return blocks } +// mineBlocksSlow mines 'num' of blocks and checks that blocks are present in +// the mining node's blockchain. numTxs should be set to the number of +// transactions (excluding the coinbase) we expect to be included in the first +// mined block. Between each mined block an artificial delay is introduced to +// give all network participants time to catch up. +func mineBlocksSlow(t *harnessTest, net *lntest.NetworkHarness, + num uint32, numTxs int) []*wire.MsgBlock { + + t.t.Helper() + + // If we expect transactions to be included in the blocks we'll mine, + // we wait here until they are seen in the miner's mempool. + var txids []*chainhash.Hash + var err error + if numTxs > 0 { + txids, err = waitForNTxsInMempool( + net.Miner.Client, numTxs, minerMempoolTimeout, + ) + require.NoError(t.t, err, "unable to find txns in mempool") + } + + blocks := make([]*wire.MsgBlock, num) + blockHashes := make([]*chainhash.Hash, 0, num) + + for i := uint32(0); i < num; i++ { + generatedHashes, err := net.Miner.Client.Generate(1) + require.NoError(t.t, err, "generate blocks") + blockHashes = append(blockHashes, generatedHashes...) + + time.Sleep(slowMineDelay) + } + + for i, blockHash := range blockHashes { + block, err := net.Miner.Client.GetBlock(blockHash) + require.NoError(t.t, err, "get blocks") + + blocks[i] = block + } + + // Finally, assert that all the transactions were included in the first + // block. + for _, txid := range txids { + assertTxInBlock(t, blocks[0], txid) + } + + return blocks +} + func assertTxInBlock(t *harnessTest, block *wire.MsgBlock, txid *chainhash.Hash) { for _, tx := range block.Transactions { sha := tx.TxHash() diff --git a/lntest/node.go b/lntest/node.go index 27ccb15c46e..06359b0edb8 100644 --- a/lntest/node.go +++ b/lntest/node.go @@ -12,6 +12,7 @@ import ( "os" "os/exec" "path/filepath" + "strings" "sync" "sync/atomic" "time" @@ -1125,7 +1126,21 @@ func (hn *HarnessNode) stop() error { // closed before a response is returned. req := lnrpc.StopRequest{} ctx := context.Background() - _, err := hn.LightningClient.StopDaemon(ctx, &req) + + err := wait.NoError(func() error { + _, err := hn.LightningClient.StopDaemon(ctx, &req) + switch { + case err == nil: + return nil + + // Try again if a recovery/rescan is in progress. + case strings.Contains(err.Error(), "recovery in progress"): + return err + + default: + return nil + } + }, DefaultTimeout) if err != nil { return err } diff --git a/lnwallet/test/test_interface.go b/lnwallet/test/test_interface.go index be2ab1d0c26..dd6bf1a9580 100644 --- a/lnwallet/test/test_interface.go +++ b/lnwallet/test/test_interface.go @@ -3201,7 +3201,7 @@ func TestLightningWallet(t *testing.T, targetBackEnd string) { testCfg := chainntnfs.CacheConfig{ QueryDisable: false, } - hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db) + hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db.Backend) if err != nil { t.Fatalf("unable to create height hint cache: %v", err) } diff --git a/macaroons/service.go b/macaroons/service.go index 69d9afa707e..808c238ed37 100644 --- a/macaroons/service.go +++ b/macaroons/service.go @@ -4,9 +4,6 @@ import ( "context" "encoding/hex" "fmt" - "os" - "path" - "time" "github.com/lightningnetwork/lnd/kvdb" "google.golang.org/grpc/metadata" @@ -17,10 +14,6 @@ import ( ) var ( - // DBFilename is the filename within the data directory which contains - // the macaroon stores. - DBFilename = "macaroons.db" - // ErrMissingRootKeyID specifies the root key ID is missing. ErrMissingRootKeyID = fmt.Errorf("missing root key ID") @@ -68,34 +61,17 @@ type Service struct { StatelessInit bool } -// NewService returns a service backed by the macaroon Bolt DB stored in the -// passed directory. The `checks` argument can be any of the `Checker` type -// functions defined in this package, or a custom checker if desired. This -// constructor prevents double-registration of checkers to prevent panics, so -// listing the same checker more than once is not harmful. Default checkers, -// such as those for `allow`, `time-before`, `declared`, and `error` caveats -// are registered automatically and don't need to be added. -func NewService(dir, location string, statelessInit bool, - dbTimeout time.Duration, checks ...Checker) (*Service, error) { - - // Ensure that the path to the directory exists. - if _, err := os.Stat(dir); os.IsNotExist(err) { - if err := os.MkdirAll(dir, 0700); err != nil { - return nil, err - } - } - - // Open the database that we'll use to store the primary macaroon key, - // and all generated macaroons+caveats. - macaroonDB, err := kvdb.Create( - kvdb.BoltBackendName, path.Join(dir, DBFilename), true, - dbTimeout, - ) - if err != nil { - return nil, err - } - - rootKeyStore, err := NewRootKeyStorage(macaroonDB) +// NewService returns a service backed by the macaroon DB backend. The `checks` +// argument can be any of the `Checker` type functions defined in this package, +// or a custom checker if desired. This constructor prevents double-registration +// of checkers to prevent panics, so listing the same checker more than once is +// not harmful. Default checkers, such as those for `allow`, `time-before`, +// `declared`, and `error` caveats are registered automatically and don't need +// to be added. +func NewService(db kvdb.Backend, location string, statelessInit bool, + checks ...Checker) (*Service, error) { + + rootKeyStore, err := NewRootKeyStorage(db) if err != nil { return nil, err } diff --git a/macaroons/service_test.go b/macaroons/service_test.go index b57a262a5cf..00b8c9bf60c 100644 --- a/macaroons/service_test.go +++ b/macaroons/service_test.go @@ -33,7 +33,7 @@ var ( // default password of 'hello'. Only the path to the temporary // DB file is returned, because the service will open the file // and read the store on its own. -func setupTestRootKeyStorage(t *testing.T) string { +func setupTestRootKeyStorage(t *testing.T) (string, kvdb.Backend) { tempDir, err := ioutil.TempDir("", "macaroonstore-") if err != nil { t.Fatalf("Error creating temp dir: %v", err) @@ -55,21 +55,20 @@ func setupTestRootKeyStorage(t *testing.T) string { if err != nil { t.Fatalf("error creating unlock: %v", err) } - return tempDir + return tempDir, db } // TestNewService tests the creation of the macaroon service. func TestNewService(t *testing.T) { // First, initialize a dummy DB file with a store that the service // can read from. Make sure the file is removed in the end. - tempDir := setupTestRootKeyStorage(t) + tempDir, db := setupTestRootKeyStorage(t) defer os.RemoveAll(tempDir) // Second, create the new service instance, unlock it and pass in a // checker that we expect it to add to the bakery. service, err := macaroons.NewService( - tempDir, "lnd", false, kvdb.DefaultDBTimeout, - macaroons.IPLockChecker, + db, "lnd", false, macaroons.IPLockChecker, ) if err != nil { t.Fatalf("Error creating new service: %v", err) @@ -117,11 +116,10 @@ func TestNewService(t *testing.T) { // incoming context. func TestValidateMacaroon(t *testing.T) { // First, initialize the service and unlock it. - tempDir := setupTestRootKeyStorage(t) + tempDir, db := setupTestRootKeyStorage(t) defer os.RemoveAll(tempDir) service, err := macaroons.NewService( - tempDir, "lnd", false, kvdb.DefaultDBTimeout, - macaroons.IPLockChecker, + db, "lnd", false, macaroons.IPLockChecker, ) if err != nil { t.Fatalf("Error creating new service: %v", err) @@ -175,14 +173,13 @@ func TestValidateMacaroon(t *testing.T) { func TestListMacaroonIDs(t *testing.T) { // First, initialize a dummy DB file with a store that the service // can read from. Make sure the file is removed in the end. - tempDir := setupTestRootKeyStorage(t) + tempDir, db := setupTestRootKeyStorage(t) defer os.RemoveAll(tempDir) // Second, create the new service instance, unlock it and pass in a // checker that we expect it to add to the bakery. service, err := macaroons.NewService( - tempDir, "lnd", false, kvdb.DefaultDBTimeout, - macaroons.IPLockChecker, + db, "lnd", false, macaroons.IPLockChecker, ) require.NoError(t, err, "Error creating new service") defer service.Close() @@ -208,14 +205,13 @@ func TestDeleteMacaroonID(t *testing.T) { // First, initialize a dummy DB file with a store that the service // can read from. Make sure the file is removed in the end. - tempDir := setupTestRootKeyStorage(t) + tempDir, db := setupTestRootKeyStorage(t) defer os.RemoveAll(tempDir) // Second, create the new service instance, unlock it and pass in a // checker that we expect it to add to the bakery. service, err := macaroons.NewService( - tempDir, "lnd", false, kvdb.DefaultDBTimeout, - macaroons.IPLockChecker, + db, "lnd", false, macaroons.IPLockChecker, ) require.NoError(t, err, "Error creating new service") defer service.Close() diff --git a/macaroons/store.go b/macaroons/store.go index 80aa7ced1ca..7b0c3364376 100644 --- a/macaroons/store.go +++ b/macaroons/store.go @@ -66,7 +66,6 @@ type RootKeyStorage struct { } // NewRootKeyStorage creates a RootKeyStorage instance. -// TODO(aakselrod): Add support for encryption of data with passphrase. func NewRootKeyStorage(db kvdb.Backend) (*RootKeyStorage, error) { // If the store's bucket doesn't exist, create it. err := kvdb.Update(db, func(tx kvdb.RwTx) error { @@ -78,7 +77,10 @@ func NewRootKeyStorage(db kvdb.Backend) (*RootKeyStorage, error) { } // Return the DB wrapped in a RootKeyStorage object. - return &RootKeyStorage{Backend: db, encKey: nil}, nil + return &RootKeyStorage{ + Backend: db, + encKey: nil, + }, nil } // CreateUnlock sets an encryption key if one is not already set, otherwise it @@ -97,7 +99,7 @@ func (r *RootKeyStorage) CreateUnlock(password *[]byte) error { return ErrPasswordRequired } - return kvdb.Update(r, func(tx kvdb.RwTx) error { + return kvdb.Update(r.Backend, func(tx kvdb.RwTx) error { bucket := tx.ReadWriteBucket(rootKeyBucketName) if bucket == nil { return ErrRootKeyBucketNotFound @@ -153,7 +155,7 @@ func (r *RootKeyStorage) ChangePassword(oldPw, newPw []byte) error { return ErrPasswordRequired } - return kvdb.Update(r, func(tx kvdb.RwTx) error { + return kvdb.Update(r.Backend, func(tx kvdb.RwTx) error { bucket := tx.ReadWriteBucket(rootKeyBucketName) if bucket == nil { return ErrRootKeyBucketNotFound @@ -225,7 +227,7 @@ func (r *RootKeyStorage) Get(_ context.Context, id []byte) ([]byte, error) { return nil, ErrStoreLocked } var rootKey []byte - err := kvdb.View(r, func(tx kvdb.RTx) error { + err := kvdb.View(r.Backend, func(tx kvdb.RTx) error { bucket := tx.ReadBucket(rootKeyBucketName) if bucket == nil { return ErrRootKeyBucketNotFound @@ -276,7 +278,7 @@ func (r *RootKeyStorage) RootKey(ctx context.Context) ([]byte, []byte, error) { return nil, nil, ErrKeyValueForbidden } - err = kvdb.Update(r, func(tx kvdb.RwTx) error { + err = kvdb.Update(r.Backend, func(tx kvdb.RwTx) error { bucket := tx.ReadWriteBucket(rootKeyBucketName) if bucket == nil { return ErrRootKeyBucketNotFound @@ -319,7 +321,7 @@ func (r *RootKeyStorage) GenerateNewRootKey() error { if r.encKey == nil { return ErrStoreLocked } - return kvdb.Update(r, func(tx kvdb.RwTx) error { + return kvdb.Update(r.Backend, func(tx kvdb.RwTx) error { bucket := tx.ReadWriteBucket(rootKeyBucketName) if bucket == nil { return ErrRootKeyBucketNotFound @@ -341,7 +343,13 @@ func (r *RootKeyStorage) Close() error { r.encKey.Zero() r.encKey = nil } - return r.Backend.Close() + + // Since we're not responsible for _creating_ the connection to our DB + // backend, we also shouldn't close it. This should be handled + // externally as to not interfere with remote DB connections in case we + // need to open/close the store twice as happens in the password change + // case. + return nil } // generateAndStoreNewRootKey creates a new random RootKeyLen-byte root key, @@ -377,7 +385,7 @@ func (r *RootKeyStorage) ListMacaroonIDs(_ context.Context) ([][]byte, error) { // Read all the items in the bucket and append the keys, which are the // root key IDs we want. - err := kvdb.View(r, func(tx kvdb.RTx) error { + err := kvdb.View(r.Backend, func(tx kvdb.RTx) error { // appendRootKey is a function closure that appends root key ID // to rootKeySlice. @@ -426,7 +434,7 @@ func (r *RootKeyStorage) DeleteMacaroonID( } var rootKeyIDDeleted []byte - err := kvdb.Update(r, func(tx kvdb.RwTx) error { + err := kvdb.Update(r.Backend, func(tx kvdb.RwTx) error { bucket := tx.ReadWriteBucket(rootKeyBucketName) // Check the key can be found. If not, return nil. diff --git a/macaroons/store_test.go b/macaroons/store_test.go index 517315c6301..8dfe9e3a83c 100644 --- a/macaroons/store_test.go +++ b/macaroons/store_test.go @@ -54,6 +54,7 @@ func openTestStore(t *testing.T, tempDir string) (func(), cleanup := func() { _ = store.Close() + _ = db.Close() } return cleanup, store @@ -108,6 +109,7 @@ func TestStore(t *testing.T) { require.Equal(t, macaroons.ErrAlreadyUnlocked, err) _ = store.Close() + _ = store.Backend.Close() // Between here and the re-opening of the store, it's possible to get // a double-close, but that's not such a big deal since the tests will @@ -206,6 +208,8 @@ func TestStoreChangePassword(t *testing.T) { // after closing. err = store.Close() require.NoError(t, err) + err = store.Backend.Close() + require.NoError(t, err) err = store.CreateUnlock(&newPw) require.Error(t, err) diff --git a/make/testing_flags.mk b/make/testing_flags.mk index 1cfea551787..e75f3d6ce36 100644 --- a/make/testing_flags.mk +++ b/make/testing_flags.mk @@ -46,7 +46,7 @@ endif # Define the integration test.run filter if the icase argument was provided. ifneq ($(icase),) -TEST_FLAGS += -test.run="TestLightningNetworkDaemon/.*-of-.*/.*/$(icase)" +TEST_FLAGS += -test.run="TestLightningNetworkDaemon/tranche.*/.*-of-.*/.*/$(icase)" endif # Run itests with specified db backend. diff --git a/pilot.go b/pilot.go index 683cabfe136..13f500f561c 100644 --- a/pilot.go +++ b/pilot.go @@ -185,7 +185,7 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot, cfg.MinConfs, lnwallet.DefaultAccountName, ) }, - Graph: autopilot.ChannelGraphFromDatabase(svr.localChanDB.ChannelGraph()), + Graph: autopilot.ChannelGraphFromDatabase(svr.graphDB), Constraints: atplConstraints, ConnectToPeer: func(target *btcec.PublicKey, addrs []net.Addr) (bool, error) { // First, we'll check if we're already connected to the @@ -258,7 +258,7 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot, // We'll fetch the current state of open // channels from the database to use as initial // state for the auto-pilot agent. - activeChannels, err := svr.remoteChanDB.FetchAllChannels() + activeChannels, err := svr.chanStateDB.FetchAllChannels() if err != nil { return nil, err } @@ -282,7 +282,7 @@ func initAutoPilot(svr *server, cfg *lncfg.AutoPilot, ChannelInfo: func(chanPoint wire.OutPoint) ( *autopilot.LocalChannel, error) { - channel, err := svr.remoteChanDB.FetchChannel(chanPoint) + channel, err := svr.chanStateDB.FetchChannel(chanPoint) if err != nil { return nil, err } diff --git a/rpcserver.go b/rpcserver.go index 8ff36d964b4..e41a05e8df5 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -614,12 +614,11 @@ func (r *rpcServer) addDeps(s *server, macService *macaroons.Service, chanPredicate *chanacceptor.ChainedAcceptor) error { // Set up router rpc backend. - channelGraph := s.localChanDB.ChannelGraph() - selfNode, err := channelGraph.SourceNode() + selfNode, err := s.graphDB.SourceNode() if err != nil { return err } - graph := s.localChanDB.ChannelGraph() + graph := s.graphDB routerBackend := &routerrpc.RouterBackend{ SelfNode: selfNode.PubKeyBytes, FetchChannelCapacity: func(chanID uint64) (btcutil.Amount, @@ -683,7 +682,7 @@ func (r *rpcServer) addDeps(s *server, macService *macaroons.Service, err = subServerCgs.PopulateDependencies( r.cfg, s.cc, r.cfg.networkDir, macService, atpl, invoiceRegistry, s.htlcSwitch, r.cfg.ActiveNetParams.Params, s.chanRouter, - routerBackend, s.nodeSigner, s.localChanDB, s.remoteChanDB, + routerBackend, s.nodeSigner, s.graphDB, s.chanStateDB, s.sweeper, tower, s.towerClient, s.anchorTowerClient, r.cfg.net.ResolveTCPAddr, genInvoiceFeatures, genAmpInvoiceFeatures, rpcsLog, @@ -1509,7 +1508,7 @@ func (r *rpcServer) VerifyMessage(ctx context.Context, // channels signed the message. // // TODO(phlip9): Require valid nodes to have capital in active channels. - graph := r.server.localChanDB.ChannelGraph() + graph := r.server.graphDB _, active, err := graph.HasLightningNode(pub) if err != nil { return nil, fmt.Errorf("failed to query graph: %v", err) @@ -1615,7 +1614,7 @@ func (r *rpcServer) DisconnectPeer(ctx context.Context, // Next, we'll fetch the pending/active channels we have with a // particular peer. - nodeChannels, err := r.server.remoteChanDB.FetchOpenChannels(peerPubKey) + nodeChannels, err := r.server.chanStateDB.FetchOpenChannels(peerPubKey) if err != nil { return nil, fmt.Errorf("unable to fetch channels for peer: %v", err) } @@ -2125,7 +2124,7 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, // First, we'll fetch the channel as is, as we'll need to examine it // regardless of if this is a force close or not. - channel, err := r.server.remoteChanDB.FetchChannel(*chanPoint) + channel, err := r.server.chanStateDB.FetchChannel(*chanPoint) if err != nil { return err } @@ -2403,7 +2402,7 @@ func (r *rpcServer) AbandonChannel(_ context.Context, return nil, err } - dbChan, err := r.server.remoteChanDB.FetchChannel(*chanPoint) + dbChan, err := r.server.chanStateDB.FetchChannel(*chanPoint) switch { // If the channel isn't found in the set of open channels, then we can // continue on as it can't be loaded into the link/peer. @@ -2450,13 +2449,11 @@ func (r *rpcServer) AbandonChannel(_ context.Context, // court. Between any step it's possible that the users restarts the // process all over again. As a result, each of the steps below are // intended to be idempotent. - err = r.server.remoteChanDB.AbandonChannel(chanPoint, uint32(bestHeight)) + err = r.server.chanStateDB.AbandonChannel(chanPoint, uint32(bestHeight)) if err != nil { return nil, err } - err = abandonChanFromGraph( - r.server.localChanDB.ChannelGraph(), chanPoint, - ) + err = abandonChanFromGraph(r.server.graphDB, chanPoint) if err != nil { return nil, err } @@ -2489,7 +2486,7 @@ func (r *rpcServer) GetInfo(_ context.Context, serverPeers := r.server.Peers() - openChannels, err := r.server.remoteChanDB.FetchAllOpenChannels() + openChannels, err := r.server.chanStateDB.FetchAllOpenChannels() if err != nil { return nil, err } @@ -2504,7 +2501,7 @@ func (r *rpcServer) GetInfo(_ context.Context, inactiveChannels := uint32(len(openChannels)) - activeChannels - pendingChannels, err := r.server.remoteChanDB.FetchPendingChannels() + pendingChannels, err := r.server.chanStateDB.FetchPendingChannels() if err != nil { return nil, fmt.Errorf("unable to get retrieve pending "+ "channels: %v", err) @@ -2905,7 +2902,7 @@ func (r *rpcServer) ChannelBalance(ctx context.Context, pendingOpenRemoteBalance lnwire.MilliSatoshi ) - openChannels, err := r.server.remoteChanDB.FetchAllOpenChannels() + openChannels, err := r.server.chanStateDB.FetchAllOpenChannels() if err != nil { return nil, err } @@ -2925,7 +2922,7 @@ func (r *rpcServer) ChannelBalance(ctx context.Context, } } - pendingChannels, err := r.server.remoteChanDB.FetchPendingChannels() + pendingChannels, err := r.server.chanStateDB.FetchPendingChannels() if err != nil { return nil, err } @@ -2999,7 +2996,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context, // First, we'll populate the response with all the channels that are // soon to be opened. We can easily fetch this data from the database // and map the db struct to the proto response. - pendingOpenChannels, err := r.server.remoteChanDB.FetchPendingChannels() + pendingOpenChannels, err := r.server.chanStateDB.FetchPendingChannels() if err != nil { rpcsLog.Errorf("unable to fetch pending channels: %v", err) return nil, err @@ -3047,7 +3044,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context, // Next, we'll examine the channels that are soon to be closed so we // can populate these fields within the response. - pendingCloseChannels, err := r.server.remoteChanDB.FetchClosedChannels(true) + pendingCloseChannels, err := r.server.chanStateDB.FetchClosedChannels(true) if err != nil { rpcsLog.Errorf("unable to fetch closed channels: %v", err) return nil, err @@ -3076,7 +3073,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context, // not found, or the channel itself, this channel was closed // in a version before we started persisting historical // channels, so we silence the error. - historical, err := r.server.remoteChanDB.FetchHistoricalChannel( + historical, err := r.server.chanStateDB.FetchHistoricalChannel( &pendingClose.ChanPoint, ) switch err { @@ -3151,7 +3148,7 @@ func (r *rpcServer) PendingChannels(ctx context.Context, // We'll also fetch all channels that are open, but have had their // commitment broadcasted, meaning they are waiting for the closing // transaction to confirm. - waitingCloseChans, err := r.server.remoteChanDB.FetchWaitingCloseChannels() + waitingCloseChans, err := r.server.chanStateDB.FetchWaitingCloseChannels() if err != nil { rpcsLog.Errorf("unable to fetch channels waiting close: %v", err) @@ -3386,7 +3383,7 @@ func (r *rpcServer) ClosedChannels(ctx context.Context, resp := &lnrpc.ClosedChannelsResponse{} - dbChannels, err := r.server.remoteChanDB.FetchClosedChannels(false) + dbChannels, err := r.server.chanStateDB.FetchClosedChannels(false) if err != nil { return nil, err } @@ -3463,9 +3460,9 @@ func (r *rpcServer) ListChannels(ctx context.Context, resp := &lnrpc.ListChannelsResponse{} - graph := r.server.localChanDB.ChannelGraph() + graph := r.server.graphDB - dbChannels, err := r.server.remoteChanDB.FetchAllOpenChannels() + dbChannels, err := r.server.chanStateDB.FetchAllOpenChannels() if err != nil { return nil, err } @@ -3816,7 +3813,7 @@ func (r *rpcServer) createRPCClosedChannel( CloseInitiator: closeInitiator, } - reports, err := r.server.remoteChanDB.FetchChannelReports( + reports, err := r.server.chanStateDB.FetchChannelReports( *r.cfg.ActiveNetParams.GenesisHash, &dbChannel.ChanPoint, ) switch err { @@ -3921,7 +3918,7 @@ func (r *rpcServer) getInitiators(chanPoint *wire.OutPoint) ( // To get the close initiator for cooperative closes, we need // to get the channel status from the historical channel bucket. - histChan, err := r.server.remoteChanDB.FetchHistoricalChannel(chanPoint) + histChan, err := r.server.chanStateDB.FetchHistoricalChannel(chanPoint) switch { // The node has upgraded from a version where we did not store // historical channels, and has not closed a channel since. Do @@ -3985,7 +3982,7 @@ func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription, // the server, or client exits. defer channelEventSub.Cancel() - graph := r.server.localChanDB.ChannelGraph() + graph := r.server.graphDB for { select { @@ -4823,8 +4820,8 @@ func (r *rpcServer) AddInvoice(ctx context.Context, ChainParams: r.cfg.ActiveNetParams.Params, NodeSigner: r.server.nodeSigner, DefaultCLTVExpiry: defaultDelta, - ChanDB: r.server.remoteChanDB, - Graph: r.server.localChanDB.ChannelGraph(), + ChanDB: r.server.chanStateDB, + Graph: r.server.graphDB, GenInvoiceFeatures: func() *lnwire.FeatureVector { return r.server.featureMgr.Get(feature.SetInvoice) }, @@ -4949,7 +4946,7 @@ func (r *rpcServer) ListInvoices(ctx context.Context, PendingOnly: req.PendingOnly, Reversed: req.Reversed, } - invoiceSlice, err := r.server.remoteChanDB.QueryInvoices(q) + invoiceSlice, err := r.server.chanStateDB.QueryInvoices(q) if err != nil { return nil, fmt.Errorf("unable to query invoices: %v", err) } @@ -5124,7 +5121,7 @@ func (r *rpcServer) DescribeGraph(ctx context.Context, // Obtain the pointer to the global singleton channel graph, this will // provide a consistent view of the graph due to bolt db's // transactional model. - graph := r.server.localChanDB.ChannelGraph() + graph := r.server.graphDB // First iterate through all the known nodes (connected or unconnected // within the graph), collating their current state into the RPC @@ -5263,7 +5260,7 @@ func (r *rpcServer) GetNodeMetrics(ctx context.Context, // Obtain the pointer to the global singleton channel graph, this will // provide a consistent view of the graph due to bolt db's // transactional model. - graph := r.server.localChanDB.ChannelGraph() + graph := r.server.graphDB // Calculate betweenness centrality if requested. Note that depending on the // graph size, this may take up to a few minutes. @@ -5302,7 +5299,7 @@ func (r *rpcServer) GetNodeMetrics(ctx context.Context, func (r *rpcServer) GetChanInfo(ctx context.Context, in *lnrpc.ChanInfoRequest) (*lnrpc.ChannelEdge, error) { - graph := r.server.localChanDB.ChannelGraph() + graph := r.server.graphDB edgeInfo, edge1, edge2, err := graph.FetchChannelEdgesByID(in.ChanId) if err != nil { @@ -5322,7 +5319,7 @@ func (r *rpcServer) GetChanInfo(ctx context.Context, func (r *rpcServer) GetNodeInfo(ctx context.Context, in *lnrpc.NodeInfoRequest) (*lnrpc.NodeInfo, error) { - graph := r.server.localChanDB.ChannelGraph() + graph := r.server.graphDB // First, parse the hex-encoded public key into a full in-memory public // key object we can work with for querying. @@ -5423,7 +5420,7 @@ func (r *rpcServer) QueryRoutes(ctx context.Context, func (r *rpcServer) GetNetworkInfo(ctx context.Context, _ *lnrpc.NetworkInfoRequest) (*lnrpc.NetworkInfo, error) { - graph := r.server.localChanDB.ChannelGraph() + graph := r.server.graphDB var ( numNodes uint32 @@ -5735,7 +5732,7 @@ func (r *rpcServer) ListPayments(ctx context.Context, query.MaxPayments = math.MaxUint64 } - paymentsQuerySlice, err := r.server.remoteChanDB.QueryPayments(query) + paymentsQuerySlice, err := r.server.chanStateDB.QueryPayments(query) if err != nil { return nil, err } @@ -5770,7 +5767,7 @@ func (r *rpcServer) DeleteAllPayments(ctx context.Context, "failed_htlcs_only=%v", req.FailedPaymentsOnly, req.FailedHtlcsOnly) - err := r.server.remoteChanDB.DeletePayments( + err := r.server.chanStateDB.DeletePayments( req.FailedPaymentsOnly, req.FailedHtlcsOnly, ) if err != nil { @@ -5893,7 +5890,7 @@ func (r *rpcServer) FeeReport(ctx context.Context, rpcsLog.Debugf("[feereport]") - channelGraph := r.server.localChanDB.ChannelGraph() + channelGraph := r.server.graphDB selfNode, err := channelGraph.SourceNode() if err != nil { return nil, err @@ -5932,7 +5929,7 @@ func (r *rpcServer) FeeReport(ctx context.Context, return nil, err } - fwdEventLog := r.server.remoteChanDB.ForwardingLog() + fwdEventLog := r.server.chanStateDB.ForwardingLog() // computeFeeSum is a helper function that computes the total fees for // a particular time slice described by a forwarding event query. @@ -6170,7 +6167,7 @@ func (r *rpcServer) ForwardingHistory(ctx context.Context, IndexOffset: req.IndexOffset, NumMaxEvents: numEvents, } - timeSlice, err := r.server.remoteChanDB.ForwardingLog().Query(eventQuery) + timeSlice, err := r.server.chanStateDB.ForwardingLog().Query(eventQuery) if err != nil { return nil, fmt.Errorf("unable to query forwarding log: %v", err) } @@ -6232,7 +6229,7 @@ func (r *rpcServer) ExportChannelBackup(ctx context.Context, // the database. If this channel has been closed, or the outpoint is // unknown, then we'll return an error unpackedBackup, err := chanbackup.FetchBackupForChan( - chanPoint, r.server.remoteChanDB, + chanPoint, r.server.chanStateDB, ) if err != nil { return nil, err @@ -6402,7 +6399,7 @@ func (r *rpcServer) ExportAllChannelBackups(ctx context.Context, // First, we'll attempt to read back ups for ALL currently opened // channels from disk. allUnpackedBackups, err := chanbackup.FetchStaticChanBackups( - r.server.remoteChanDB, + r.server.chanStateDB, ) if err != nil { return nil, fmt.Errorf("unable to fetch all static chan "+ @@ -6425,7 +6422,7 @@ func (r *rpcServer) RestoreChannelBackups(ctx context.Context, // restore either a set of chanbackup.Single or chanbackup.Multi // backups. chanRestorer := &chanDBRestorer{ - db: r.server.remoteChanDB, + db: r.server.chanStateDB, secretKeys: r.server.cc.KeyRing, chainArb: r.server.chainArb, } @@ -6523,7 +6520,7 @@ func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription // we'll obtains the current set of single channel // backups from disk. chanBackups, err := chanbackup.FetchStaticChanBackups( - r.server.remoteChanDB, + r.server.chanStateDB, ) if err != nil { return fmt.Errorf("unable to fetch all "+ diff --git a/server.go b/server.go index 2b914b90a4c..b21c71a47c9 100644 --- a/server.go +++ b/server.go @@ -220,9 +220,9 @@ type server struct { fundingMgr *funding.Manager - localChanDB *channeldb.DB + graphDB *channeldb.ChannelGraph - remoteChanDB *channeldb.DB + chanStateDB *channeldb.DB htlcSwitch *htlcswitch.Switch @@ -352,8 +352,7 @@ func noiseDial(idKey keychain.SingleKeyECDH, // newServer creates a new instance of the server which is to listen using the // passed listener address. func newServer(cfg *Config, listenAddrs []net.Addr, - localChanDB, remoteChanDB *channeldb.DB, - towerClientDB wtclient.DB, cc *chainreg.ChainControl, + dbs *databaseInstances, cc *chainreg.ChainControl, nodeKeyDesc *keychain.KeyDescriptor, chansToRestore walletunlocker.ChannelsToRecover, chanPredicate chanacceptor.ChannelAcceptor, @@ -383,12 +382,9 @@ func newServer(cfg *Config, listenAddrs []net.Addr, var serializedPubKey [33]byte copy(serializedPubKey[:], nodeKeyECDH.PubKey().SerializeCompressed()) - // Initialize the sphinx router, placing it's persistent replay log in - // the same directory as the channel graph database. We don't need to - // replicate this data, so we'll store it locally. + // Initialize the sphinx router. replayLog := htlcswitch.NewDecayedLog( - cfg.localDatabaseDir(), defaultSphinxDbName, cfg.DB.Bolt, - cc.ChainNotifier, + dbs.decayedLogDB, cc.ChainNotifier, ) sphinxRouter := sphinx.NewRouter( nodeKeyECDH, cfg.ActiveNetParams.Params, replayLog, @@ -435,15 +431,15 @@ func newServer(cfg *Config, listenAddrs []net.Addr, s := &server{ cfg: cfg, - localChanDB: localChanDB, - remoteChanDB: remoteChanDB, + graphDB: dbs.graphDB.ChannelGraph(), + chanStateDB: dbs.chanStateDB, cc: cc, sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.Signer), writePool: writePool, readPool: readPool, chansToRestore: chansToRestore, - channelNotifier: channelnotifier.New(remoteChanDB), + channelNotifier: channelnotifier.New(dbs.chanStateDB), identityECDH: nodeKeyECDH, nodeSigner: netann.NewNodeSigner(nodeKeySigner), @@ -475,7 +471,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, } s.witnessBeacon = &preimageBeacon{ - wCache: remoteChanDB.NewWitnessCache(), + wCache: dbs.chanStateDB.NewWitnessCache(), subscribers: make(map[uint64]*preimageSubscriber), } @@ -489,13 +485,13 @@ func newServer(cfg *Config, listenAddrs []net.Addr, uint32(currentHeight), currentHash, cc.ChainNotifier, ) s.invoices = invoices.NewRegistry( - remoteChanDB, expiryWatcher, ®istryConfig, + dbs.chanStateDB, expiryWatcher, ®istryConfig, ) s.htlcNotifier = htlcswitch.NewHtlcNotifier(time.Now) s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{ - DB: remoteChanDB, + DB: dbs.chanStateDB, LocalChannelClose: func(pubKey []byte, request *htlcswitch.ChanClose) { @@ -510,7 +506,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, peer.HandleLocalCloseChanReqs(request) }, - FwdingLog: remoteChanDB.ForwardingLog(), + FwdingLog: dbs.chanStateDB.ForwardingLog(), SwitchPackager: channeldb.NewSwitchPackager(), ExtractErrorEncrypter: s.sphinx.ExtractErrorEncrypter, FetchLastChannelUpdate: s.fetchLastChanUpdate(), @@ -537,8 +533,8 @@ func newServer(cfg *Config, listenAddrs []net.Addr, MessageSigner: s.nodeSigner, IsChannelActive: s.htlcSwitch.HasActiveLink, ApplyChannelUpdate: s.applyChannelUpdate, - DB: remoteChanDB, - Graph: localChanDB.ChannelGraph(), + DB: dbs.chanStateDB, + Graph: dbs.graphDB.ChannelGraph(), } chanStatusMgr, err := netann.NewChanStatusManager(chanStatusMgrCfg) @@ -630,7 +626,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, // As the graph can be obtained at anytime from the network, we won't // replicate it, and instead it'll only be stored locally. - chanGraph := localChanDB.ChannelGraph() + chanGraph := dbs.graphDB.ChannelGraph() // We'll now reconstruct a node announcement based on our current // configuration so we can send it out as a sort of heart beat within @@ -697,7 +693,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, // The router will get access to the payment ID sequencer, such that it // can generate unique payment IDs. - sequencer, err := htlcswitch.NewPersistentSequencer(remoteChanDB) + sequencer, err := htlcswitch.NewPersistentSequencer(dbs.chanStateDB) if err != nil { return nil, err } @@ -742,7 +738,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, } s.missionControl, err = routing.NewMissionControl( - remoteChanDB, selfNode.PubKeyBytes, + dbs.chanStateDB, selfNode.PubKeyBytes, &routing.MissionControlConfig{ ProbabilityEstimatorCfg: estimatorCfg, MaxMcHistory: routingConfig.MaxMcHistory, @@ -775,7 +771,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, PathFindingConfig: pathFindingConfig, } - paymentControl := channeldb.NewPaymentControl(remoteChanDB) + paymentControl := channeldb.NewPaymentControl(dbs.chanStateDB) s.controlTower = routing.NewControlTower(paymentControl) @@ -803,12 +799,12 @@ func newServer(cfg *Config, listenAddrs []net.Addr, return nil, fmt.Errorf("can't create router: %v", err) } - chanSeries := discovery.NewChanSeries(s.localChanDB.ChannelGraph()) - gossipMessageStore, err := discovery.NewMessageStore(s.remoteChanDB) + chanSeries := discovery.NewChanSeries(s.graphDB) + gossipMessageStore, err := discovery.NewMessageStore(s.chanStateDB) if err != nil { return nil, err } - waitingProofStore, err := channeldb.NewWaitingProofStore(s.remoteChanDB) + waitingProofStore, err := channeldb.NewWaitingProofStore(s.chanStateDB) if err != nil { return nil, err } @@ -848,10 +844,12 @@ func newServer(cfg *Config, listenAddrs []net.Addr, ForAllOutgoingChannels: s.chanRouter.ForAllOutgoingChannels, PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate, UpdateForwardingPolicies: s.htlcSwitch.UpdateForwardingPolicies, - FetchChannel: s.remoteChanDB.FetchChannel, + FetchChannel: s.chanStateDB.FetchChannel, } - utxnStore, err := newNurseryStore(s.cfg.ActiveNetParams.GenesisHash, remoteChanDB) + utxnStore, err := newNurseryStore( + s.cfg.ActiveNetParams.GenesisHash, dbs.chanStateDB, + ) if err != nil { srvrLog.Errorf("unable to create nursery store: %v", err) return nil, err @@ -861,7 +859,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, sweep.DefaultBatchWindowDuration) sweeperStore, err := sweep.NewSweeperStore( - remoteChanDB, s.cfg.ActiveNetParams.GenesisHash, + dbs.chanStateDB, s.cfg.ActiveNetParams.GenesisHash, ) if err != nil { srvrLog.Errorf("unable to create sweeper store: %v", err) @@ -888,8 +886,8 @@ func newServer(cfg *Config, listenAddrs []net.Addr, s.utxoNursery = newUtxoNursery(&NurseryConfig{ ChainIO: cc.ChainIO, ConfDepth: 1, - FetchClosedChannels: remoteChanDB.FetchClosedChannels, - FetchClosedChannel: remoteChanDB.FetchClosedChannel, + FetchClosedChannels: dbs.chanStateDB.FetchClosedChannels, + FetchClosedChannel: dbs.chanStateDB.FetchClosedChannel, Notifier: cc.ChainNotifier, PublishTransaction: cc.Wallet.PublishTransaction, Store: utxnStore, @@ -1010,18 +1008,18 @@ func newServer(cfg *Config, listenAddrs []net.Addr, PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod, IsForwardedHTLC: s.htlcSwitch.IsForwardedHTLC, Clock: clock.NewDefaultClock(), - }, remoteChanDB) + }, dbs.chanStateDB) s.breachArbiter = newBreachArbiter(&BreachConfig{ CloseLink: closeLink, - DB: remoteChanDB, + DB: dbs.chanStateDB, Estimator: s.cc.FeeEstimator, GenSweepScript: newSweepPkScriptGen(cc.Wallet), Notifier: cc.ChainNotifier, PublishTransaction: cc.Wallet.PublishTransaction, ContractBreaches: contractBreaches, Signer: cc.Wallet.Cfg.Signer, - Store: newRetributionStore(remoteChanDB), + Store: newRetributionStore(dbs.chanStateDB), }) // Select the configuration and furnding parameters for Bitcoin or @@ -1069,7 +1067,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, FindChannel: func(chanID lnwire.ChannelID) ( *channeldb.OpenChannel, error) { - dbChannels, err := remoteChanDB.FetchAllChannels() + dbChannels, err := dbs.chanStateDB.FetchAllChannels() if err != nil { return nil, err } @@ -1241,10 +1239,10 @@ func newServer(cfg *Config, listenAddrs []net.Addr, // static backup of the latest channel state. chanNotifier := &channelNotifier{ chanNotifier: s.channelNotifier, - addrs: s.remoteChanDB, + addrs: s.chanStateDB, } backupFile := chanbackup.NewMultiFile(cfg.BackupFilePath) - startingChans, err := chanbackup.FetchStaticChanBackups(s.remoteChanDB) + startingChans, err := chanbackup.FetchStaticChanBackups(s.chanStateDB) if err != nil { return nil, err } @@ -1267,10 +1265,10 @@ func newServer(cfg *Config, listenAddrs []net.Addr, SubscribePeerEvents: func() (subscribe.Subscription, error) { return s.peerNotifier.SubscribePeerEvents() }, - GetOpenChannels: s.remoteChanDB.FetchAllOpenChannels, + GetOpenChannels: s.chanStateDB.FetchAllOpenChannels, Clock: clock.NewDefaultClock(), - ReadFlapCount: s.remoteChanDB.ReadFlapCount, - WriteFlapCount: s.remoteChanDB.WriteFlapCounts, + ReadFlapCount: s.chanStateDB.ReadFlapCount, + WriteFlapCount: s.chanStateDB.WriteFlapCounts, FlapCountTicker: ticker.New(chanfitness.FlapCountFlushRate), }) @@ -1307,7 +1305,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, SecretKeyRing: s.cc.KeyRing, Dial: cfg.net.Dial, AuthDial: authDial, - DB: towerClientDB, + DB: dbs.towerClientDB, Policy: policy, ChainHash: *s.cfg.ActiveNetParams.GenesisHash, MinBackoff: 10 * time.Second, @@ -1330,7 +1328,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, SecretKeyRing: s.cc.KeyRing, Dial: cfg.net.Dial, AuthDial: authDial, - DB: towerClientDB, + DB: dbs.towerClientDB, Policy: anchorPolicy, ChainHash: *s.cfg.ActiveNetParams.GenesisHash, MinBackoff: 10 * time.Second, @@ -1675,7 +1673,7 @@ func (s *server) Start() error { // that have all the information we need to handle channel // recovery _before_ we even accept connections from any peers. chanRestorer := &chanDBRestorer{ - db: s.remoteChanDB, + db: s.chanStateDB, secretKeys: s.cc.KeyRing, chainArb: s.chainArb, } @@ -1720,7 +1718,7 @@ func (s *server) Start() error { // we'll prune our set of link nodes found within the database // to ensure we don't reconnect to any nodes we no longer have // open channels with. - if err := s.remoteChanDB.PruneLinkNodes(); err != nil { + if err := s.chanStateDB.PruneLinkNodes(); err != nil { startErr = err return } @@ -2130,7 +2128,7 @@ func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, e // First, we'll create an instance of the ChannelGraphBootstrapper as // this can be used by default if we've already partially seeded the // network. - chanGraph := autopilot.ChannelGraphFromDatabase(s.localChanDB.ChannelGraph()) + chanGraph := autopilot.ChannelGraphFromDatabase(s.graphDB) graphBootstrapper, err := discovery.NewGraphBootstrapper(chanGraph) if err != nil { return nil, err @@ -2467,7 +2465,7 @@ func (s *server) createNewHiddenService() error { AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(), } copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed()) - if err := s.localChanDB.ChannelGraph().SetSourceNode(selfNode); err != nil { + if err := s.graphDB.SetSourceNode(selfNode); err != nil { return fmt.Errorf("can't set self node: %v", err) } @@ -2524,7 +2522,7 @@ func (s *server) establishPersistentConnections() error { // Iterate through the list of LinkNodes to find addresses we should // attempt to connect to based on our set of previous connections. Set // the reconnection port to the default peer port. - linkNodes, err := s.remoteChanDB.FetchAllLinkNodes() + linkNodes, err := s.chanStateDB.FetchAllLinkNodes() if err != nil && err != channeldb.ErrLinkNodesNotFound { return err } @@ -2540,8 +2538,7 @@ func (s *server) establishPersistentConnections() error { // After checking our previous connections for addresses to connect to, // iterate through the nodes in our channel graph to find addresses // that have been added via NodeAnnouncement messages. - chanGraph := s.localChanDB.ChannelGraph() - sourceNode, err := chanGraph.SourceNode() + sourceNode, err := s.graphDB.SourceNode() if err != nil { return err } @@ -3247,8 +3244,8 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, ReadPool: s.readPool, Switch: s.htlcSwitch, InterceptSwitch: s.interceptableSwitch, - ChannelDB: s.remoteChanDB, - ChannelGraph: s.localChanDB.ChannelGraph(), + ChannelDB: s.chanStateDB, + ChannelGraph: s.graphDB, ChainArb: s.chainArb, AuthGossiper: s.authGossiper, ChanStatusMgr: s.chanStatusMgr, @@ -3906,7 +3903,7 @@ func (s *server) fetchNodeAdvertisedAddr(pub *btcec.PublicKey) (net.Addr, error) return nil, err } - node, err := s.localChanDB.ChannelGraph().FetchLightningNode(nil, vertex) + node, err := s.graphDB.FetchLightningNode(nil, vertex) if err != nil { return nil, err } diff --git a/subrpcserver_config.go b/subrpcserver_config.go index 14671f53c63..bf5911ec215 100644 --- a/subrpcserver_config.go +++ b/subrpcserver_config.go @@ -92,8 +92,8 @@ func (s *subRPCServerConfigs) PopulateDependencies(cfg *Config, chanRouter *routing.ChannelRouter, routerBackend *routerrpc.RouterBackend, nodeSigner *netann.NodeSigner, - localChanDB *channeldb.DB, - remoteChanDB *channeldb.DB, + graphDB *channeldb.ChannelGraph, + chanStateDB *channeldb.DB, sweeper *sweep.UtxoSweeper, tower *watchtower.Standalone, towerClient wtclient.Client, @@ -222,11 +222,11 @@ func (s *subRPCServerConfigs) PopulateDependencies(cfg *Config, subCfgValue.FieldByName("DefaultCLTVExpiry").Set( reflect.ValueOf(defaultDelta), ) - subCfgValue.FieldByName("LocalChanDB").Set( - reflect.ValueOf(localChanDB), + subCfgValue.FieldByName("GraphDB").Set( + reflect.ValueOf(graphDB), ) - subCfgValue.FieldByName("RemoteChanDB").Set( - reflect.ValueOf(remoteChanDB), + subCfgValue.FieldByName("ChanStateDB").Set( + reflect.ValueOf(chanStateDB), ) subCfgValue.FieldByName("GenInvoiceFeatures").Set( reflect.ValueOf(genInvoiceFeatures), diff --git a/walletunlocker/service.go b/walletunlocker/service.go index 9865795c384..b5d76868659 100644 --- a/walletunlocker/service.go +++ b/walletunlocker/service.go @@ -13,6 +13,7 @@ import ( "github.com/lightningnetwork/lnd/aezeed" "github.com/lightningnetwork/lnd/chanbackup" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/kvdb" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/btcwallet" @@ -124,30 +125,28 @@ type UnlockerService struct { // the WalletUnlocker service. MacResponseChan chan []byte - chainDir string - noFreelistSync bool - netParams *chaincfg.Params + netParams *chaincfg.Params // macaroonFiles is the path to the three generated macaroons with // different access permissions. These might not exist in a stateless // initialization of lnd. macaroonFiles []string - // dbTimeout specifies the timeout value to use when opening the wallet - // database. - dbTimeout time.Duration - // resetWalletTransactions indicates that the wallet state should be // reset on unlock to force a full chain rescan. resetWalletTransactions bool // LoaderOpts holds the functional options for the wallet loader. loaderOpts []btcwallet.LoaderOption + + // macaroonDB is an instance of a database backend that stores all + // macaroon root keys. This will be nil on initialization and must be + // set using the SetMacaroonDB method as soon as it's available. + macaroonDB kvdb.Backend } // New creates and returns a new UnlockerService. -func New(chainDir string, params *chaincfg.Params, noFreelistSync bool, - macaroonFiles []string, dbTimeout time.Duration, +func New(params *chaincfg.Params, macaroonFiles []string, resetWalletTransactions bool, loaderOpts []btcwallet.LoaderOption) *UnlockerService { @@ -158,11 +157,8 @@ func New(chainDir string, params *chaincfg.Params, noFreelistSync bool, // Make sure we buffer the channel is buffered so the main lnd // goroutine isn't blocking on writing to it. MacResponseChan: make(chan []byte, 1), - chainDir: chainDir, netParams: params, macaroonFiles: macaroonFiles, - dbTimeout: dbTimeout, - noFreelistSync: noFreelistSync, resetWalletTransactions: resetWalletTransactions, loaderOpts: loaderOpts, } @@ -174,6 +170,12 @@ func (u *UnlockerService) SetLoaderOpts(loaderOpts []btcwallet.LoaderOption) { u.loaderOpts = loaderOpts } +// SetMacaroonDB can be used to inject the macaroon database after the unlocker +// service has been hooked to the main RPC server. +func (u *UnlockerService) SetMacaroonDB(macaroonDB kvdb.Backend) { + u.macaroonDB = macaroonDB +} + func (u *UnlockerService) newLoader(recoveryWindow uint32) (*wallet.Loader, error) { @@ -265,7 +267,7 @@ func (u *UnlockerService) GenSeed(_ context.Context, } return &lnrpc.GenSeedResponse{ - CipherSeedMnemonic: []string(mnemonic[:]), + CipherSeedMnemonic: mnemonic[:], EncipheredSeed: encipheredSeed[:], }, nil } @@ -286,9 +288,7 @@ func extractChanBackups(chanBackups *lnrpc.ChanBackupSnapshot) *ChannelsToRecove var backups ChannelsToRecover if chanBackups.MultiChanBackup != nil { multiBackup := chanBackups.MultiChanBackup - backups.PackedMultiChanBackup = chanbackup.PackedMulti( - multiBackup.MultiChanBackup, - ) + backups.PackedMultiChanBackup = multiBackup.MultiChanBackup } if chanBackups.SingleChanBackups == nil { @@ -609,9 +609,8 @@ func (u *UnlockerService) ChangePassword(ctx context.Context, // then close it again. // Attempt to open the macaroon DB, unlock it and then change // the passphrase. - netDir := btcwallet.NetworkDir(u.chainDir, u.netParams) macaroonService, err := macaroons.NewService( - netDir, "lnd", in.StatelessInit, u.dbTimeout, + u.macaroonDB, "lnd", in.StatelessInit, ) if err != nil { return nil, err diff --git a/walletunlocker/service_test.go b/walletunlocker/service_test.go index bacf1b5ba15..85403fb0bbd 100644 --- a/walletunlocker/service_test.go +++ b/walletunlocker/service_test.go @@ -113,7 +113,7 @@ func openOrCreateTestMacStore(tempDir string, pw *[]byte, return nil, err } db, err := kvdb.Create( - kvdb.BoltBackendName, path.Join(netDir, macaroons.DBFilename), + kvdb.BoltBackendName, path.Join(netDir, "macaroons.db"), true, kvdb.DefaultDBTimeout, ) if err != nil { @@ -154,8 +154,7 @@ func TestGenSeed(t *testing.T) { }() service := walletunlocker.New( - testDir, testNetParams, true, nil, kvdb.DefaultDBTimeout, - false, testLoaderOpts(testDir), + testNetParams, nil, false, testLoaderOpts(testDir), ) // Now that the service has been created, we'll ask it to generate a @@ -192,8 +191,7 @@ func TestGenSeedGenerateEntropy(t *testing.T) { _ = os.RemoveAll(testDir) }() service := walletunlocker.New( - testDir, testNetParams, true, nil, kvdb.DefaultDBTimeout, - false, testLoaderOpts(testDir), + testNetParams, nil, false, testLoaderOpts(testDir), ) // Now that the service has been created, we'll ask it to generate a @@ -229,8 +227,7 @@ func TestGenSeedInvalidEntropy(t *testing.T) { _ = os.RemoveAll(testDir) }() service := walletunlocker.New( - testDir, testNetParams, true, nil, kvdb.DefaultDBTimeout, - false, testLoaderOpts(testDir), + testNetParams, nil, false, testLoaderOpts(testDir), ) // Now that the service has been created, we'll ask it to generate a @@ -263,8 +260,7 @@ func TestInitWallet(t *testing.T) { // Create new UnlockerService. service := walletunlocker.New( - testDir, testNetParams, true, nil, kvdb.DefaultDBTimeout, - false, testLoaderOpts(testDir), + testNetParams, nil, false, testLoaderOpts(testDir), ) // Once we have the unlocker service created, we'll now instantiate a @@ -352,8 +348,7 @@ func TestCreateWalletInvalidEntropy(t *testing.T) { // Create new UnlockerService. service := walletunlocker.New( - testDir, testNetParams, true, nil, kvdb.DefaultDBTimeout, - false, testLoaderOpts(testDir), + testNetParams, nil, false, testLoaderOpts(testDir), ) // We'll attempt to init the wallet with an invalid cipher seed and @@ -385,8 +380,7 @@ func TestUnlockWallet(t *testing.T) { // Create new UnlockerService that'll also drop the wallet's history on // unlock. service := walletunlocker.New( - testDir, testNetParams, true, nil, kvdb.DefaultDBTimeout, - true, testLoaderOpts(testDir), + testNetParams, nil, true, testLoaderOpts(testDir), ) ctx := context.Background() @@ -477,9 +471,9 @@ func TestChangeWalletPasswordNewRootkey(t *testing.T) { // Create a new UnlockerService with our temp files. service := walletunlocker.New( - testDir, testNetParams, true, tempFiles, kvdb.DefaultDBTimeout, - false, testLoaderOpts(testDir), + testNetParams, tempFiles, false, testLoaderOpts(testDir), ) + service.SetMacaroonDB(store.Backend) ctx := context.Background() newPassword := []byte("hunter2???") @@ -588,10 +582,11 @@ func TestChangeWalletPasswordStateless(t *testing.T) { // Create a new UnlockerService with our temp files. service := walletunlocker.New( - testDir, testNetParams, true, []string{ + testNetParams, []string{ tempMacFile, nonExistingFile, - }, kvdb.DefaultDBTimeout, false, testLoaderOpts(testDir), + }, false, testLoaderOpts(testDir), ) + service.SetMacaroonDB(store.Backend) // Create a wallet we can try to unlock. We use the default password // so we can check that the unlocker service defaults to this when diff --git a/watchtower/wtdb/client_db.go b/watchtower/wtdb/client_db.go index 7f98a69850e..101c792e97b 100644 --- a/watchtower/wtdb/client_db.go +++ b/watchtower/wtdb/client_db.go @@ -6,7 +6,6 @@ import ( "fmt" "math" "net" - "time" "github.com/btcsuite/btcd/btcec" "github.com/lightningnetwork/lnd/kvdb" @@ -14,11 +13,6 @@ import ( "github.com/lightningnetwork/lnd/watchtower/blob" ) -const ( - // clientDBName is the filename of client database. - clientDBName = "wtclient.db" -) - var ( // cSessionKeyIndexBkt is a top-level bucket storing: // tower-id -> reserved-session-key-index (uint32). @@ -116,11 +110,43 @@ var ( ErrLastTowerAddr = errors.New("cannot remove last tower address") ) +// NewBoltBackendCreator returns a function that creates a new bbolt backend for +// the watchtower database. +func NewBoltBackendCreator(active bool, dbPath, + dbFileName string) func(boltCfg *kvdb.BoltConfig) (kvdb.Backend, error) { + + // If the watchtower client isn't active, we return a function that + // always returns a nil DB to make sure we don't create empty database + // files. + if !active { + return func(_ *kvdb.BoltConfig) (kvdb.Backend, error) { + return nil, nil + } + } + + return func(boltCfg *kvdb.BoltConfig) (kvdb.Backend, error) { + cfg := &kvdb.BoltBackendConfig{ + DBPath: dbPath, + DBFileName: dbFileName, + NoFreelistSync: !boltCfg.SyncFreelist, + AutoCompact: boltCfg.AutoCompact, + AutoCompactMinAge: boltCfg.AutoCompactMinAge, + DBTimeout: boltCfg.DBTimeout, + } + + db, err := kvdb.GetBoltBackend(cfg) + if err != nil { + return nil, fmt.Errorf("could not open boltdb: %v", err) + } + + return db, nil + } +} + // ClientDB is single database providing a persistent storage engine for the // wtclient. type ClientDB struct { - db kvdb.Backend - dbPath string + db kvdb.Backend } // OpenClientDB opens the client database given the path to the database's @@ -130,22 +156,19 @@ type ClientDB struct { // migrations will be applied before returning. Any attempt to open a database // with a version number higher that the latest version will fail to prevent // accidental reversion. -func OpenClientDB(dbPath string, dbTimeout time.Duration) (*ClientDB, error) { - bdb, firstInit, err := createDBIfNotExist( - dbPath, clientDBName, dbTimeout, - ) +func OpenClientDB(db kvdb.Backend) (*ClientDB, error) { + firstInit, err := isFirstInit(db) if err != nil { return nil, err } clientDB := &ClientDB{ - db: bdb, - dbPath: dbPath, + db: db, } err = initOrSyncVersions(clientDB, firstInit, clientDBVersions) if err != nil { - bdb.Close() + db.Close() return nil, err } @@ -156,7 +179,7 @@ func OpenClientDB(dbPath string, dbTimeout time.Duration) (*ClientDB, error) { // missing, this will trigger a ErrUninitializedDB error. err = kvdb.Update(clientDB.db, initClientDBBuckets, func() {}) if err != nil { - bdb.Close() + db.Close() return nil, err } diff --git a/watchtower/wtdb/client_db_test.go b/watchtower/wtdb/client_db_test.go index 448ae45d234..0f9235c4d96 100644 --- a/watchtower/wtdb/client_db_test.go +++ b/watchtower/wtdb/client_db_test.go @@ -771,6 +771,7 @@ func checkAckedUpdates(t *testing.T, session *wtdb.ClientSession, // and the mock implementation. This ensures that all databases function // identically, especially in the negative paths. func TestClientDB(t *testing.T) { + dbCfg := &kvdb.BoltConfig{DBTimeout: kvdb.DefaultDBTimeout} dbs := []struct { name string init clientDBInit @@ -784,9 +785,15 @@ func TestClientDB(t *testing.T) { err) } - db, err := wtdb.OpenClientDB( - path, kvdb.DefaultDBTimeout, - ) + bdb, err := wtdb.NewBoltBackendCreator( + true, path, "wtclient.db", + )(dbCfg) + if err != nil { + os.RemoveAll(path) + t.Fatalf("unable to open db: %v", err) + } + + db, err := wtdb.OpenClientDB(bdb) if err != nil { os.RemoveAll(path) t.Fatalf("unable to open db: %v", err) @@ -809,18 +816,30 @@ func TestClientDB(t *testing.T) { err) } - db, err := wtdb.OpenClientDB( - path, kvdb.DefaultDBTimeout, - ) + bdb, err := wtdb.NewBoltBackendCreator( + true, path, "wtclient.db", + )(dbCfg) + if err != nil { + os.RemoveAll(path) + t.Fatalf("unable to open db: %v", err) + } + + db, err := wtdb.OpenClientDB(bdb) if err != nil { os.RemoveAll(path) t.Fatalf("unable to open db: %v", err) } db.Close() - db, err = wtdb.OpenClientDB( - path, kvdb.DefaultDBTimeout, - ) + bdb, err = wtdb.NewBoltBackendCreator( + true, path, "wtclient.db", + )(dbCfg) + if err != nil { + os.RemoveAll(path) + t.Fatalf("unable to open db: %v", err) + } + + db, err = wtdb.OpenClientDB(bdb) if err != nil { os.RemoveAll(path) t.Fatalf("unable to reopen db: %v", err) diff --git a/watchtower/wtdb/db_common.go b/watchtower/wtdb/db_common.go index de14bff64ec..68df9630591 100644 --- a/watchtower/wtdb/db_common.go +++ b/watchtower/wtdb/db_common.go @@ -3,18 +3,10 @@ package wtdb import ( "encoding/binary" "errors" - "os" - "path/filepath" - "time" "github.com/lightningnetwork/lnd/kvdb" ) -const ( - // dbFilePermission requests read+write access to the db file. - dbFilePermission = 0600 -) - var ( // metadataBkt stores all the meta information concerning the state of // the database. @@ -35,67 +27,19 @@ var ( byteOrder = binary.BigEndian ) -// fileExists returns true if the file exists, and false otherwise. -func fileExists(path string) bool { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - return false - } - } - - return true -} - -// createDBIfNotExist opens the boltdb database at dbPath/name, creating one if -// one doesn't exist. The boolean returned indicates if the database did not -// exist before, or if it has been created but no version metadata exists within -// it. -func createDBIfNotExist(dbPath, name string, - dbTimeout time.Duration) (kvdb.Backend, bool, error) { - - path := filepath.Join(dbPath, name) - - // If the database file doesn't exist, this indicates we much initialize - // a fresh database with the latest version. - firstInit := !fileExists(path) - if firstInit { - // Ensure all parent directories are initialized. - err := os.MkdirAll(dbPath, 0700) - if err != nil { - return nil, false, err - } - } - - // Specify bbolt freelist options to reduce heap pressure in case the - // freelist grows to be very large. - bdb, err := kvdb.Create( - kvdb.BoltBackendName, path, true, dbTimeout, - ) +// isFirstInit returns true if the given database has not yet been initialized, +// e.g. no metadata bucket is present yet. +func isFirstInit(db kvdb.Backend) (bool, error) { + var metadataExists bool + err := kvdb.View(db, func(tx kvdb.RTx) error { + metadataExists = tx.ReadBucket(metadataBkt) != nil + return nil + }, func() { + metadataExists = false + }) if err != nil { - return nil, false, err - } - - // If the file existed previously, we'll now check to see that the - // metadata bucket is properly initialized. It could be the case that - // the database was created, but we failed to actually populate any - // metadata. If the metadata bucket does not actually exist, we'll - // set firstInit to true so that we can treat is initialize the bucket. - if !firstInit { - var metadataExists bool - err = kvdb.View(bdb, func(tx kvdb.RTx) error { - metadataExists = tx.ReadBucket(metadataBkt) != nil - return nil - }, func() { - metadataExists = false - }) - if err != nil { - return nil, false, err - } - - if !metadataExists { - firstInit = true - } + return false, err } - return bdb, firstInit, nil + return !metadataExists, nil } diff --git a/watchtower/wtdb/tower_db.go b/watchtower/wtdb/tower_db.go index 6f67298f800..50d491411da 100644 --- a/watchtower/wtdb/tower_db.go +++ b/watchtower/wtdb/tower_db.go @@ -3,7 +3,6 @@ package wtdb import ( "bytes" "errors" - "time" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/lightningnetwork/lnd/chainntnfs" @@ -11,11 +10,6 @@ import ( "github.com/lightningnetwork/lnd/watchtower/blob" ) -const ( - // towerDBName is the filename of tower database. - towerDBName = "watchtower.db" -) - var ( // sessionsBkt is a bucket containing all negotiated client sessions. // session id -> session @@ -56,8 +50,7 @@ var ( // TowerDB is single database providing a persistent storage engine for the // wtserver and lookout subsystems. type TowerDB struct { - db kvdb.Backend - dbPath string + db kvdb.Backend } // OpenTowerDB opens the tower database given the path to the database's @@ -67,22 +60,19 @@ type TowerDB struct { // migrations will be applied before returning. Any attempt to open a database // with a version number higher that the latest version will fail to prevent // accidental reversion. -func OpenTowerDB(dbPath string, dbTimeout time.Duration) (*TowerDB, error) { - bdb, firstInit, err := createDBIfNotExist( - dbPath, towerDBName, dbTimeout, - ) +func OpenTowerDB(db kvdb.Backend) (*TowerDB, error) { + firstInit, err := isFirstInit(db) if err != nil { return nil, err } towerDB := &TowerDB{ - db: bdb, - dbPath: dbPath, + db: db, } err = initOrSyncVersions(towerDB, firstInit, towerDBVersions) if err != nil { - bdb.Close() + db.Close() return nil, err } @@ -93,7 +83,7 @@ func OpenTowerDB(dbPath string, dbTimeout time.Duration) (*TowerDB, error) { // missing, this will trigger a ErrUninitializedDB error. err = kvdb.Update(towerDB.db, initTowerDBBuckets, func() {}) if err != nil { - bdb.Close() + db.Close() return nil, err } diff --git a/watchtower/wtdb/tower_db_test.go b/watchtower/wtdb/tower_db_test.go index 7debe8a0e69..413f7c43bd0 100644 --- a/watchtower/wtdb/tower_db_test.go +++ b/watchtower/wtdb/tower_db_test.go @@ -630,6 +630,7 @@ var stateUpdateInvalidBlobSize = stateUpdateTest{ } func TestTowerDB(t *testing.T) { + dbCfg := &kvdb.BoltConfig{DBTimeout: kvdb.DefaultDBTimeout} dbs := []struct { name string init dbInit @@ -643,9 +644,15 @@ func TestTowerDB(t *testing.T) { err) } - db, err := wtdb.OpenTowerDB( - path, kvdb.DefaultDBTimeout, - ) + bdb, err := wtdb.NewBoltBackendCreator( + true, path, "watchtower.db", + )(dbCfg) + if err != nil { + os.RemoveAll(path) + t.Fatalf("unable to open db: %v", err) + } + + db, err := wtdb.OpenTowerDB(bdb) if err != nil { os.RemoveAll(path) t.Fatalf("unable to open db: %v", err) @@ -668,9 +675,15 @@ func TestTowerDB(t *testing.T) { err) } - db, err := wtdb.OpenTowerDB( - path, kvdb.DefaultDBTimeout, - ) + bdb, err := wtdb.NewBoltBackendCreator( + true, path, "watchtower.db", + )(dbCfg) + if err != nil { + os.RemoveAll(path) + t.Fatalf("unable to open db: %v", err) + } + + db, err := wtdb.OpenTowerDB(bdb) if err != nil { os.RemoveAll(path) t.Fatalf("unable to open db: %v", err) @@ -680,9 +693,15 @@ func TestTowerDB(t *testing.T) { // Open the db again, ensuring we test a // different path during open and that all // buckets remain initialized. - db, err = wtdb.OpenTowerDB( - path, kvdb.DefaultDBTimeout, - ) + bdb, err = wtdb.NewBoltBackendCreator( + true, path, "watchtower.db", + )(dbCfg) + if err != nil { + os.RemoveAll(path) + t.Fatalf("unable to open db: %v", err) + } + + db, err = wtdb.OpenTowerDB(bdb) if err != nil { os.RemoveAll(path) t.Fatalf("unable to open db: %v", err)