From b565ce02fde3e13656759631923e3beb9b124540 Mon Sep 17 00:00:00 2001 From: ziggie Date: Fri, 5 Dec 2025 14:32:00 +0100 Subject: [PATCH 1/2] multi: change sql db default max connection defaults --- invoices/kv_sql_migration_test.go | 2 +- kvdb/kvdb_sqlite.go | 5 ++++- kvdb/postgres/fixture.go | 8 +++++++- lncfg/db.go | 2 +- sample-lnd.conf | 2 +- sqldb/config.go | 11 +++++++++-- sqldb/postgres.go | 2 +- sqldb/postgres_fixture.go | 1 + sqldb/sqlite.go | 9 +++++++-- 9 files changed, 32 insertions(+), 10 deletions(-) diff --git a/invoices/kv_sql_migration_test.go b/invoices/kv_sql_migration_test.go index 709a3c8e106..9c208a0fa2f 100644 --- a/invoices/kv_sql_migration_test.go +++ b/invoices/kv_sql_migration_test.go @@ -148,7 +148,7 @@ func TestMigrationWithChannelDB(t *testing.T) { // Just some sane defaults for the sqlite config. const ( timeout = 5 * time.Second - maxConns = 50 + maxConns = 2 ) sqliteConfig := &sqlite.Config{ diff --git a/kvdb/kvdb_sqlite.go b/kvdb/kvdb_sqlite.go index ec880da63ce..9e89b80ab8d 100644 --- a/kvdb/kvdb_sqlite.go +++ b/kvdb/kvdb_sqlite.go @@ -17,7 +17,10 @@ const ( // tag is defined. This will allow testing of other database backends. SqliteBackend = true - testMaxConnections = 50 + // Sqlite allows for concurrent reads however writers are limited to + // 1 so we select 2 here to allow for concurrent reads but keep the + // number of connections low to avoid write contention. + testMaxConnections = 2 ) // StartSqliteTestBackend starts a sqlite backed wallet.DB instance diff --git a/kvdb/postgres/fixture.go b/kvdb/postgres/fixture.go index 449ba8de662..b8a0e4c6f01 100644 --- a/kvdb/postgres/fixture.go +++ b/kvdb/postgres/fixture.go @@ -27,13 +27,18 @@ func getTestDsn(dbName string) string { var testPostgres *embeddedpostgres.EmbeddedPostgres +// testMaxConnections is the total number of connections to the database server. const testMaxConnections = 200 +// testMaxConnectionPerDatabase is the number of connections per database the +// golang driver will open. +const testMaxConnectionPerDatabase = 10 + // StartEmbeddedPostgres starts an embedded postgres instance. This only needs // to be done once, because NewFixture will create random new databases on every // call. It returns a stop closure that stops the database if called. func StartEmbeddedPostgres() (func() error, error) { - sqlbase.Init(testMaxConnections) + sqlbase.Init(testMaxConnectionPerDatabase) postgres := embeddedpostgres.NewDatabase( embeddedpostgres.DefaultConfig(). @@ -43,6 +48,7 @@ func StartEmbeddedPostgres() (func() error, error) { "max_connections": fmt.Sprintf( "%d", testMaxConnections, ), + "max_pred_locks_per_transaction": "256", }, ), ) diff --git a/lncfg/db.go b/lncfg/db.go index 5bd4d2e19bb..fbdf62f80d1 100644 --- a/lncfg/db.go +++ b/lncfg/db.go @@ -39,7 +39,7 @@ const ( SqliteBackend = "sqlite" DefaultBatchCommitInterval = 500 * time.Millisecond - defaultPostgresMaxConnections = 50 + defaultPostgresMaxConnections = 10 defaultSqliteMaxConnections = 2 defaultSqliteBusyTimeout = 5 * time.Second diff --git a/sample-lnd.conf b/sample-lnd.conf index c9a2865b858..a0c6b723be8 100644 --- a/sample-lnd.conf +++ b/sample-lnd.conf @@ -1618,7 +1618,7 @@ ; recommended to set a limit that is below the server connection limit. ; Otherwise errors may occur in lnd under high-load conditions. ; Default: -; db.postgres.maxconnections=50 +; db.postgres.maxconnections=10 ; Example: ; db.postgres.maxconnections= diff --git a/sqldb/config.go b/sqldb/config.go index 59801dbeaf1..95234a0d01a 100644 --- a/sqldb/config.go +++ b/sqldb/config.go @@ -7,12 +7,19 @@ import ( ) const ( - // defaultMaxConns is the number of permitted active and idle + // defaultMaxConnsPostgres is the number of permitted active and idle // connections. We want to limit this so it isn't unlimited. We use the // same value for the number of idle connections as, this can speed up // queries given a new connection doesn't need to be established each // time. - defaultMaxConns = 25 + defaultMaxConnsPostgres = 10 + + // defaultMaxConnsSqlite is the number of permitted active and idle + // connections. We want to limit this to 2 because SQLite allows for + // concurrent reads however writers are limited to 1 so we select 2 here + // to allow for concurrent reads but keep the number of connections low + // to avoid write contention. + defaultMaxConnsSqlite = 2 // connIdleLifetime is the amount of time a connection can be idle. connIdleLifetime = 5 * time.Minute diff --git a/sqldb/postgres.go b/sqldb/postgres.go index 70dba82a1a0..66ae8b895e3 100644 --- a/sqldb/postgres.go +++ b/sqldb/postgres.go @@ -122,7 +122,7 @@ func NewPostgresStore(cfg *PostgresConfig) (*PostgresStore, error) { err) } - maxConns := defaultMaxConns + maxConns := defaultMaxConnsPostgres if cfg.MaxConnections > 0 { maxConns = cfg.MaxConnections } diff --git a/sqldb/postgres_fixture.go b/sqldb/postgres_fixture.go index 91b95d66ef9..42d11c24e0e 100644 --- a/sqldb/postgres_fixture.go +++ b/sqldb/postgres_fixture.go @@ -60,6 +60,7 @@ func NewTestPgFixture(t testing.TB, expiry time.Duration) *TestPgFixture { "-c", "log_statement=all", "-c", "log_destination=stderr", "-c", "max_connections=5000", + "-c", "max_pred_locks_per_transaction=256", }, }, func(config *docker.HostConfig) { // Set AutoRemove to true so that stopped container goes away diff --git a/sqldb/sqlite.go b/sqldb/sqlite.go index 2b2f7be1350..6514caee4be 100644 --- a/sqldb/sqlite.go +++ b/sqldb/sqlite.go @@ -130,8 +130,13 @@ func NewSqliteStore(cfg *SqliteConfig, dbPath string) (*SqliteStore, error) { err) } - db.SetMaxOpenConns(defaultMaxConns) - db.SetMaxIdleConns(defaultMaxConns) + maxConns := defaultMaxConnsSqlite + if cfg.MaxConnections > 0 { + maxConns = cfg.MaxConnections + } + + db.SetMaxOpenConns(maxConns) + db.SetMaxIdleConns(maxConns) db.SetConnMaxLifetime(connIdleLifetime) queries := sqlc.New(db) From 214af567aae437d977175fa9c202756e1c1b2bfb Mon Sep 17 00:00:00 2001 From: ziggie Date: Fri, 5 Dec 2025 17:47:05 +0100 Subject: [PATCH 2/2] sqldb: make sure we do not use a new transaction everytime --- sqldb/postgres.go | 58 ++++++++++++++++++++++++++++++++++++++++++++++- sqldb/sqlite.go | 53 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 109 insertions(+), 2 deletions(-) diff --git a/sqldb/postgres.go b/sqldb/postgres.go index 66ae8b895e3..6989976d5ee 100644 --- a/sqldb/postgres.go +++ b/sqldb/postgres.go @@ -4,11 +4,13 @@ import ( "context" "database/sql" "fmt" + "io/fs" "net/url" "path" "strings" "time" + "github.com/golang-migrate/migrate/v4/database" pgx_migrate "github.com/golang-migrate/migrate/v4/database/pgx/v5" _ "github.com/golang-migrate/migrate/v4/source/file" // Read migrations from files. // nolint:ll _ "github.com/jackc/pgx/v5" @@ -148,6 +150,33 @@ func (s *PostgresStore) GetBaseDB() *BaseDB { return s.BaseDB } +// postgresMigrationExecutor implements MigrationExecutor using a single reusable +// migrate driver to avoid holding multiple database connections. +type postgresMigrationExecutor struct { + driver database.Driver + dbName string + fs fs.FS +} + +// ExecuteMigrations runs migrations using the pre-created driver. +func (p *postgresMigrationExecutor) ExecuteMigrations(target MigrationTarget) error { + return applyMigrations(p.fs, p.driver, "sqlc/migrations", p.dbName, target) +} + +// GetSchemaVersion returns the current schema version using the pre-created driver. +func (p *postgresMigrationExecutor) GetSchemaVersion() (int, bool, error) { + version, dirty, err := p.driver.Version() + if err != nil { + return 0, false, err + } + return version, dirty, nil +} + +// SetSchemaVersion sets the schema version using the pre-created driver. +func (p *postgresMigrationExecutor) SetSchemaVersion(version int, dirty bool) error { + return p.driver.SetVersion(version, dirty) +} + // ApplyAllMigrations applies both the SQLC and custom in-code migrations to the // Postgres database. func (s *PostgresStore) ApplyAllMigrations(ctx context.Context, @@ -158,7 +187,30 @@ func (s *PostgresStore) ApplyAllMigrations(ctx context.Context, return nil } - return ApplyMigrations(ctx, s.BaseDB, s, migrations) + // Create a single migrate driver that will be reused for all migration + // operations to avoid holding multiple database connections. + dbName, err := getDatabaseNameFromDSN(s.cfg.Dsn) + if err != nil { + return err + } + + driver, err := pgx_migrate.WithInstance(s.DB, &pgx_migrate.Config{}) + if err != nil { + return errPostgresMigration(err) + } + + // Note: We don't call driver.Close() here because it closes the + // underlying *sql.DB. The single connection held by the driver will + // be cleaned up when the *sql.DB is closed. + + // Create a migration executor that reuses this driver. + executor := &postgresMigrationExecutor{ + driver: driver, + dbName: dbName, + fs: newReplacerFS(sqlSchemas, postgresSchemaReplacements), + } + + return ApplyMigrations(ctx, s.BaseDB, executor, migrations) } func errPostgresMigration(err error) error { @@ -178,6 +230,10 @@ func (s *PostgresStore) ExecuteMigrations(target MigrationTarget) error { return errPostgresMigration(err) } + // Note: We don't call driver.Close() here because it closes the + // underlying *sql.DB. The connection held by the driver will be + // cleaned up when the *sql.DB is closed. + // Populate the database with our set of schemas based on our embedded // in-memory file system. postgresFS := newReplacerFS(sqlSchemas, postgresSchemaReplacements) diff --git a/sqldb/sqlite.go b/sqldb/sqlite.go index 6514caee4be..0a4edee6cce 100644 --- a/sqldb/sqlite.go +++ b/sqldb/sqlite.go @@ -6,10 +6,12 @@ import ( "context" "database/sql" "fmt" + "io/fs" "net/url" "path/filepath" "testing" + "github.com/golang-migrate/migrate/v4/database" sqlite_migrate "github.com/golang-migrate/migrate/v4/database/sqlite" "github.com/lightningnetwork/lnd/sqldb/sqlc" "github.com/stretchr/testify/require" @@ -157,6 +159,32 @@ func (s *SqliteStore) GetBaseDB() *BaseDB { return s.BaseDB } +// sqliteMigrationExecutor implements MigrationExecutor using a single reusable +// migrate driver to avoid holding multiple database connections. +type sqliteMigrationExecutor struct { + driver database.Driver + fs fs.FS +} + +// ExecuteMigrations runs migrations using the pre-created driver. +func (s *sqliteMigrationExecutor) ExecuteMigrations(target MigrationTarget) error { + return applyMigrations(s.fs, s.driver, "sqlc/migrations", "sqlite", target) +} + +// GetSchemaVersion returns the current schema version using the pre-created driver. +func (s *sqliteMigrationExecutor) GetSchemaVersion() (int, bool, error) { + version, dirty, err := s.driver.Version() + if err != nil { + return 0, dirty, err + } + return version, dirty, nil +} + +// SetSchemaVersion sets the schema version using the pre-created driver. +func (s *sqliteMigrationExecutor) SetSchemaVersion(version int, dirty bool) error { + return s.driver.SetVersion(version, dirty) +} + // ApplyAllMigrations applies both the SQLC and custom in-code migrations to the // SQLite database. func (s *SqliteStore) ApplyAllMigrations(ctx context.Context, @@ -167,7 +195,26 @@ func (s *SqliteStore) ApplyAllMigrations(ctx context.Context, return nil } - return ApplyMigrations(ctx, s.BaseDB, s, migrations) + // Create a single migrate driver that will be reused for all migration + // operations to avoid holding multiple database connections. + driver, err := sqlite_migrate.WithInstance( + s.DB, &sqlite_migrate.Config{}, + ) + if err != nil { + return errSqliteMigration(err) + } + + // Note: We don't call driver.Close() here because it closes the + // underlying *sql.DB. The single connection held by the driver will + // be cleaned up when the *sql.DB is closed. + + // Create a migration executor that reuses this driver. + executor := &sqliteMigrationExecutor{ + driver: driver, + fs: newReplacerFS(sqlSchemas, sqliteSchemaReplacements), + } + + return ApplyMigrations(ctx, s.BaseDB, executor, migrations) } func errSqliteMigration(err error) error { @@ -184,6 +231,10 @@ func (s *SqliteStore) ExecuteMigrations(target MigrationTarget) error { return errSqliteMigration(err) } + // Note: We intentionally don't call driver.Close() here because it would + // close the underlying *sql.DB that we passed to WithInstance. The + // driver will be cleaned up when the *sql.DB is closed. + // Populate the database with our set of schemas based on our embedded // in-memory file system. sqliteFS := newReplacerFS(sqlSchemas, sqliteSchemaReplacements)