diff --git a/itest/list_on_test.go b/itest/list_on_test.go index 5a12cee4b64..cb5bd680bd0 100644 --- a/itest/list_on_test.go +++ b/itest/list_on_test.go @@ -698,4 +698,8 @@ var allTestCases = []*lntest.TestCase{ Name: "send to route failed htlc timeout", TestFunc: testSendToRouteFailHTLCTimeout, }, + { + Name: "invoice migration", + TestFunc: testInvoiceMigration, + }, } diff --git a/itest/lnd_invoice_migration_test.go b/itest/lnd_invoice_migration_test.go new file mode 100644 index 00000000000..6bacfb99610 --- /dev/null +++ b/itest/lnd_invoice_migration_test.go @@ -0,0 +1,261 @@ +package itest + +import ( + "database/sql" + "path" + + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" + "github.com/lightningnetwork/lnd/invoices" + "github.com/lightningnetwork/lnd/kvdb" + "github.com/lightningnetwork/lnd/kvdb/postgres" + "github.com/lightningnetwork/lnd/kvdb/sqlbase" + "github.com/lightningnetwork/lnd/kvdb/sqlite" + "github.com/lightningnetwork/lnd/lncfg" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/node" + "github.com/lightningnetwork/lnd/sqldb" + "github.com/stretchr/testify/require" +) + +func openChannelDB(ht *lntest.HarnessTest, hn *node.HarnessNode) *channeldb.DB { + sqlbase.Init(0) + var ( + backend kvdb.Backend + err error + ) + + switch hn.Cfg.DBBackend { + case node.BackendSqlite: + backend, err = kvdb.Open( + kvdb.SqliteBackendName, + ht.Context(), + &sqlite.Config{ + Timeout: defaultTimeout, + BusyTimeout: defaultTimeout, + }, + hn.Cfg.DBDir(), lncfg.SqliteChannelDBName, + lncfg.NSChannelDB, + ) + require.NoError(ht, err) + + case node.BackendPostgres: + backend, err = kvdb.Open( + kvdb.PostgresBackendName, ht.Context(), + &postgres.Config{ + Dsn: hn.Cfg.PostgresDsn, + Timeout: defaultTimeout, + }, lncfg.NSChannelDB, + ) + require.NoError(ht, err) + } + + db, err := channeldb.CreateWithBackend(backend) + require.NoError(ht, err) + + return db +} + +func openNativeSQLInvoiceDB(ht *lntest.HarnessTest, + hn *node.HarnessNode) invoices.InvoiceDB { + + var db *sqldb.BaseDB + + switch hn.Cfg.DBBackend { + case node.BackendSqlite: + sqliteStore, err := sqldb.NewSqliteStore( + &sqldb.SqliteConfig{ + Timeout: defaultTimeout, + BusyTimeout: defaultTimeout, + }, + path.Join( + hn.Cfg.DBDir(), + lncfg.SqliteNativeDBName, + ), + ) + require.NoError(ht, err) + db = sqliteStore.BaseDB + + case node.BackendPostgres: + postgresStore, err := sqldb.NewPostgresStore( + &sqldb.PostgresConfig{ + Dsn: hn.Cfg.PostgresDsn, + Timeout: defaultTimeout, + }, + ) + require.NoError(ht, err) + db = postgresStore.BaseDB + } + + executor := sqldb.NewTransactionExecutor( + db, func(tx *sql.Tx) invoices.SQLInvoiceQueries { + return db.WithTx(tx) + }, + ) + + return invoices.NewSQLStore( + executor, clock.NewDefaultClock(), + ) +} + +// testInvoiceMigration tests that the invoice migration from the old KV store +// to the new native SQL store works as expected. +func testInvoiceMigration(ht *lntest.HarnessTest) { + alice, bob := ht.Alice, ht.Bob + + // Make sure we run the test with SQLite or Postgres. + if bob.Cfg.DBBackend != node.BackendSqlite && + bob.Cfg.DBBackend != node.BackendPostgres { + + ht.Skip("node not running with SQLite or Postgres") + } + + // Skip the test if the node is already running with native SQL. + if bob.Cfg.NativeSQL { + ht.Skip("node already running with native SQL") + } + + ht.EnsureConnected(alice, bob) + cp := ht.OpenChannel( + alice, bob, lntest.OpenChannelParams{ + Amt: 1000000, + PushAmt: 500000, + }, + ) + + // Alice and bob should have one channel open with each other now. + ht.AssertNodeNumChannels(alice, 1) + ht.AssertNodeNumChannels(bob, 1) + + ht.RestartNodeWithExtraArgs(bob, []string{ + "--accept-amp", + }) + + // Step 1: Add 10 normal invoices and pay 5 of them. + normalInvoices := make([]*lnrpc.AddInvoiceResponse, 10) + for i := 0; i < 10; i++ { + invoice := &lnrpc.Invoice{ + Value: int64(1000 + i*100), // Varying amounts + IsAmp: false, + } + + resp := ht.Bob.RPC.AddInvoice(invoice) + normalInvoices[i] = resp + } + + for _, inv := range normalInvoices { + sendReq := &routerrpc.SendPaymentRequest{ + PaymentRequest: inv.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + } + + ht.SendPaymentAssertSettled(ht.Alice, sendReq) + } + + // Step 2: Add 10 AMP invoices and send multiple payments to 5 of them. + ampInvoices := make([]*lnrpc.AddInvoiceResponse, 10) + for i := 0; i < 10; i++ { + invoice := &lnrpc.Invoice{ + Value: int64(2000 + i*200), // Varying amounts + IsAmp: true, + } + + resp := bob.RPC.AddInvoice(invoice) + ampInvoices[i] = resp + } + + // Select first 5 invoices to send multiple payments. + for i := 0; i < 5; i++ { + inv := ampInvoices[i] + + for j := 0; j < 3; j++ { // Send 3 payments to each + payReq := &routerrpc.SendPaymentRequest{ + PaymentRequest: inv.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + Amp: true, + } + + ht.SendPaymentAssertSettled(alice, payReq) + + // Generate an external payment address when attempting + // to pseudo-reuse an AMP invoice. When using an + // external payment address, we'll also expect an extra + // invoice to appear in the ListInvoices response, since + // a new invoice will be JIT inserted under a different + // payment address than the one in the invoice. + // + // NOTE: This will only work when the peer has + // spontaneous AMP payments enabled otherwise no invoice + // under a different payment_addr will be found. + payReq.PaymentAddr = ht.Random32Bytes() + ht.SendPaymentAssertSettled(alice, payReq) + } + } + + // Now list invoices and assert that we have 35 invoices in total. + bobInvoices := bob.RPC.ListInvoices(&lnrpc.ListInvoiceRequest{}) + require.Len(ht, bobInvoices.Invoices, 35) + + // We can close the channel now. + ht.CloseChannel(alice, cp) + + // Now stop Bob so we can open the DB for examination. + require.NoError(ht, bob.Stop()) + + db := openChannelDB(ht, bob) + + query := invoices.InvoiceQuery{ + IndexOffset: 0, + // As a sanity check, fetch more invoices than we have + // to ensure that we did not add any extra invoices. + NumMaxInvoices: 9999, + } + + // Fetch all invoices and make sure we have 35 in total. + result1, err := db.QueryInvoices(ht.Context(), query) + require.NoError(ht, err) + + numInvoices := len(result1.Invoices) + require.Equal(ht, 35, numInvoices) + + bob.SetExtraArgs([]string{"--db.use-native-sql"}) + + // Now run the migration flow three times to ensure that each run is + // idempotent. + for i := 0; i < 3; i++ { + // Start bob with the native SQL flag set. This will trigger the + // migration to run. + require.NoError(ht, bob.Start(ht.Context())) + + // At this point the migration should have completed and the + // node should be running with native SQL. Now we'll stop Bob + // again so we can safely examine the database. + require.NoError(ht, bob.Stop()) + + // Now we'll open the database with the native SQL backend and + // fetch the invoices again to ensure that they were migrated + // correctly. + sqlInvoiceDB := openNativeSQLInvoiceDB(ht, bob) + result2, err := sqlInvoiceDB.QueryInvoices(ht.Context(), query) + require.NoError(ht, err) + + require.Equal(ht, numInvoices, len(result2.Invoices)) + + // Simply zero out the add index so we don't fail on that when + // comparing. + for i := 0; i < numInvoices; i++ { + result1.Invoices[i].AddIndex = 0 + result2.Invoices[i].AddIndex = 0 + require.Equal( + ht, result1.Invoices[i], result2.Invoices[i], + ) + } + } + + // Start Bob again so the test can complete. + require.NoError(ht, bob.Start(ht.Context())) +}