diff --git a/database/postgres/postgres_store_schema_test.go b/database/postgres/postgres_store_schema_test.go new file mode 100644 index 000000000..08eaa9a41 --- /dev/null +++ b/database/postgres/postgres_store_schema_test.go @@ -0,0 +1,275 @@ +package postgres + +import ( + "database/sql" + "testing" + + "github.com/dhui/dktest" + "github.com/golang-migrate/migrate/v4/database" + "github.com/golang-migrate/migrate/v4/dktesting" + _ "github.com/lib/pq" +) + +var storageSpecs = []dktesting.ContainerSpec{ + {ImageName: "postgres:13", Options: opts}, +} + +func TestStorageMigrations(t *testing.T) { + testStorageBasicOperations(t) +} + +func TestSyncMigrations(t *testing.T) { + testSyncMultipleMigrations(t) +} + +func TestStorageSchemaUpgrade(t *testing.T) { + testSchemaUpgrade(t) +} + +func TestStorageErrorHandling(t *testing.T) { + testStorageErrorCases(t) +} + +func testStorageBasicOperations(t *testing.T) { + dktesting.ParallelTest(t, storageSpecs, func(t *testing.T, c dktest.ContainerInfo) { + d := setupPostgresDriver(t, c) + defer closeDriver(t, d) + + // Cast to storage driver + storageDriver := castToStorageDriver(t, d) + + // Test storing and retrieving migrations + testUpScript := []byte("CREATE TABLE test_table (id SERIAL PRIMARY KEY, name VARCHAR(255));") + testDownScript := []byte("DROP TABLE test_table;") + + // Store migration (both up and down) + err := storageDriver.StoreMigration(1, testUpScript, testDownScript) + if err != nil { + t.Fatalf("Failed to store migration: %v", err) + } + + // Retrieve migration + retrievedUp, retrievedDown, err := storageDriver.GetMigration(1) + if err != nil { + t.Fatalf("Failed to retrieve migration: %v", err) + } + + if string(retrievedUp) != string(testUpScript) { + t.Errorf("Retrieved up migration doesn't match. Expected: %s, Got: %s", testUpScript, retrievedUp) + } + + if string(retrievedDown) != string(testDownScript) { + t.Errorf("Retrieved down migration doesn't match. Expected: %s, Got: %s", testDownScript, retrievedDown) + } + + // Test getting stored migrations list + versions, err := storageDriver.GetStoredMigrations() + if err != nil { + t.Fatalf("Failed to get stored migrations: %v", err) + } + + if len(versions) != 1 || versions[0] != 1 { + t.Errorf("Expected stored migrations [1], got %v", versions) + } + }) +} + +func testSyncMultipleMigrations(t *testing.T) { + // This test would require setting up a source driver with multiple migrations + // For now, we'll test the basic storage functionality only + t.Skip("SyncMigrations requires source driver setup - testing basic storage instead") +} + +func testSchemaUpgrade(t *testing.T) { + dktesting.ParallelTest(t, storageSpecs, func(t *testing.T, c dktest.ContainerInfo) { + ip, port, err := c.FirstPort() + if err != nil { + t.Fatal(err) + } + + // Create a direct database connection to set up old schema + connStr := pgConnectionString(ip, port) + db, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + // Create the old schema format (without storage columns) + _, err = db.Exec(`CREATE TABLE schema_migrations ( + version bigint NOT NULL PRIMARY KEY, + dirty boolean NOT NULL + )`) + if err != nil { + t.Fatalf("Failed to create old schema: %v", err) + } + + // Insert some existing migration records + _, err = db.Exec(`INSERT INTO schema_migrations (version, dirty) VALUES (1, false), (2, false)`) + if err != nil { + t.Fatalf("Failed to insert existing records: %v", err) + } + + // Close the direct connection + db.Close() + + // Now create the postgres driver which should trigger schema upgrade + d := setupPostgresDriver(t, c) + defer closeDriver(t, d) + + // Cast to storage driver (this should trigger the schema upgrade) + storageDriver := castToStorageDriver(t, d) + + // Try to store a migration - this should work after schema upgrade + err = storageDriver.StoreMigration(3, []byte("CREATE TABLE test_upgrade (id SERIAL);"), []byte("DROP TABLE test_upgrade;")) + if err != nil { + t.Fatalf("Failed to store migration after schema upgrade: %v", err) + } + + // Get underlying sql.DB for verification + pgDriver := d.(*Postgres) + dbVerify := pgDriver.db + + // Verify the schema has the new columns + verifySchemaUpgrade(t, dbVerify) + verifyExistingRecordsPreserved(t, storageDriver) + }) +} + +func testStorageErrorCases(t *testing.T) { + dktesting.ParallelTest(t, storageSpecs, func(t *testing.T, c dktest.ContainerInfo) { + d := setupPostgresDriver(t, c) + defer closeDriver(t, d) + + storageDriver := castToStorageDriver(t, d) + + // Test retrieving non-existent migration + _, _, err := storageDriver.GetMigration(999) + if err == nil { + t.Error("Expected error when retrieving non-existent migration") + } + + // Store a valid migration first + err = storageDriver.StoreMigration(1, []byte("CREATE TABLE test (id SERIAL);"), []byte("DROP TABLE test;")) + if err != nil { + t.Fatalf("Failed to store valid migration: %v", err) + } + + // Test storing duplicate migration (should update, not error) + err = storageDriver.StoreMigration(1, []byte("CREATE TABLE test_updated (id SERIAL);"), []byte("DROP TABLE test_updated;")) + if err != nil { + t.Errorf("Unexpected error when updating existing migration: %v", err) + } + + // Verify the migration was updated + upScript, _, err := storageDriver.GetMigration(1) + if err != nil { + t.Fatalf("Failed to retrieve updated migration: %v", err) + } + + expected := "CREATE TABLE test_updated (id SERIAL);" + if string(upScript) != expected { + t.Errorf("Migration was not updated. Expected: %s, Got: %s", expected, upScript) + } + }) +} + +// Helper functions + +func setupPostgresDriver(t *testing.T, c dktest.ContainerInfo) database.Driver { + ip, port, err := c.FirstPort() + if err != nil { + t.Fatal(err) + } + + p := &Postgres{} + addr := pgConnectionString(ip, port) + + d, err := p.Open(addr) + if err != nil { + t.Fatal(err) + } + return d +} + +func closeDriver(t *testing.T, d database.Driver) { + if err := d.Close(); err != nil { + t.Error(err) + } +} + +func castToStorageDriver(t *testing.T, d database.Driver) database.MigrationStorageDriver { + storageDriver, ok := d.(database.MigrationStorageDriver) + if !ok { + t.Fatal("Postgres driver does not implement MigrationStorageDriver interface") + } + return storageDriver +} + +func verifySchemaUpgrade(t *testing.T, db *sql.DB) { + rows, err := db.Query(`SELECT column_name FROM information_schema.columns + WHERE table_name = 'schema_migrations' ORDER BY column_name`) + if err != nil { + t.Fatalf("Failed to query schema columns: %v", err) + } + defer rows.Close() + + var columns []string + for rows.Next() { + var col string + if err := rows.Scan(&col); err != nil { + t.Fatalf("Failed to scan column name: %v", err) + } + columns = append(columns, col) + } + + expectedColumns := []string{"created_at", "dirty", "down_script", "up_script", "version"} + if len(columns) != len(expectedColumns) { + t.Errorf("Expected %d columns, got %d: %v", len(expectedColumns), len(columns), columns) + } + + for i, expected := range expectedColumns { + if i >= len(columns) || columns[i] != expected { + t.Errorf("Column mismatch at position %d. Expected: %s, Got: %v", i, expected, columns) + break + } + } +} + +func verifyExistingRecordsPreserved(t *testing.T, storageDriver database.MigrationStorageDriver) { + // Verify that existing version records are preserved during schema upgrade + // We should be able to query the table directly to see all version records + pgDriver := storageDriver.(*Postgres) + db := pgDriver.db + + // Check that all version records (with and without scripts) are preserved + rows, err := db.Query(`SELECT version FROM schema_migrations ORDER BY version ASC`) + if err != nil { + t.Fatalf("Failed to query version records: %v", err) + } + defer rows.Close() + + var versions []uint + for rows.Next() { + var version int64 + if err := rows.Scan(&version); err != nil { + t.Fatalf("Failed to scan version: %v", err) + } + versions = append(versions, uint(version)) + } + + // Should have at least 3 version records: 1, 2 (original), and 3 (newly stored) + if len(versions) < 3 { + t.Errorf("Expected at least 3 version records after upgrade, got %d: %v", len(versions), versions) + } + + // Check that GetStoredMigrations only returns the one with scripts (version 3) + storedVersions, err := storageDriver.GetStoredMigrations() + if err != nil { + t.Fatalf("Failed to get stored migrations: %v", err) + } + + if len(storedVersions) != 1 || storedVersions[0] != 3 { + t.Errorf("Expected GetStoredMigrations to return [3], got %v", storedVersions) + } +} diff --git a/database/postgres/postgres_test.go b/database/postgres/postgres_test.go index 0eea47e94..76711bcf2 100644 --- a/database/postgres/postgres_test.go +++ b/database/postgres/postgres_test.go @@ -100,6 +100,12 @@ func Test(t *testing.T) { t.Run("testWithInstanceConcurrent", testWithInstanceConcurrent) t.Run("testWithConnection", testWithConnection) + // Storage functionality tests + t.Run("TestStorageMigrations", TestStorageMigrations) + t.Run("TestSyncMigrations", TestSyncMigrations) + t.Run("TestStorageSchemaUpgrade", TestStorageSchemaUpgrade) + t.Run("TestStorageErrorHandling", TestStorageErrorHandling) + t.Cleanup(func() { for _, spec := range specs { t.Log("Cleaning up ", spec.ImageName) diff --git a/database/postgres/storage.go b/database/postgres/storage.go new file mode 100644 index 000000000..af3bbe95d --- /dev/null +++ b/database/postgres/storage.go @@ -0,0 +1,293 @@ +package postgres + +import ( + "context" + "database/sql" + "fmt" + "io" + + "github.com/golang-migrate/migrate/v4/database" + "github.com/golang-migrate/migrate/v4/source" + "github.com/lib/pq" +) + +// Ensure Postgres implements MigrationStorageDriver +var _ database.MigrationStorageDriver = &Postgres{} + +// ensureEnhancedVersionTable checks if the enhanced versions table exists and creates/updates it. +// This version includes columns for storing migration scripts. +func (p *Postgres) ensureEnhancedVersionTable() (err error) { + if err = p.Lock(); err != nil { + return err + } + + defer func() { + if e := p.Unlock(); e != nil { + if err == nil { + err = e + } else { + err = fmt.Errorf("unlock error: %v, original error: %v", e, err) + } + } + }() + + exists, err := p.tableExists() + if err != nil { + return err + } + + if !exists { + return p.createEnhancedTable() + } + + return p.addMissingColumns() +} + +// tableExists checks if the migrations table exists +func (p *Postgres) tableExists() (bool, error) { + query := `SELECT COUNT(1) FROM information_schema.tables WHERE table_schema = $1 AND table_name = $2 LIMIT 1` + row := p.conn.QueryRowContext(context.Background(), query, p.config.migrationsSchemaName, p.config.migrationsTableName) + + var count int + err := row.Scan(&count) + if err != nil { + return false, &database.Error{OrigErr: err, Query: []byte(query)} + } + + return count > 0, nil +} + +// createEnhancedTable creates the migrations table with all required columns +func (p *Postgres) createEnhancedTable() error { + query := `CREATE TABLE IF NOT EXISTS ` + pq.QuoteIdentifier(p.config.migrationsSchemaName) + `.` + pq.QuoteIdentifier(p.config.migrationsTableName) + ` ( + version bigint not null primary key, + dirty boolean not null, + up_script text, + down_script text, + created_at timestamp with time zone default now() + )` + if _, err := p.conn.ExecContext(context.Background(), query); err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + return nil +} + +// addMissingColumns adds any missing columns to existing table +func (p *Postgres) addMissingColumns() error { + columns := []string{"up_script", "down_script", "created_at"} + + for _, column := range columns { + exists, err := p.columnExists(column) + if err != nil { + return err + } + + if !exists { + if err := p.addColumn(column); err != nil { + return err + } + } + } + + return nil +} + +// columnExists checks if a specific column exists in the migrations table +func (p *Postgres) columnExists(columnName string) (bool, error) { + query := `SELECT COUNT(1) FROM information_schema.columns + WHERE table_schema = $1 AND table_name = $2 AND column_name = $3` + + var count int + err := p.conn.QueryRowContext(context.Background(), query, + p.config.migrationsSchemaName, p.config.migrationsTableName, columnName).Scan(&count) + if err != nil { + return false, &database.Error{OrigErr: err, Query: []byte(query)} + } + + return count > 0, nil +} + +// addColumn adds a specific column to the migrations table +func (p *Postgres) addColumn(columnName string) error { + var columnDef string + switch columnName { + case "up_script", "down_script": + columnDef = "text" + case "created_at": + columnDef = "timestamp with time zone default now()" + default: + return fmt.Errorf("unknown column: %s", columnName) + } + + alterQuery := `ALTER TABLE ` + pq.QuoteIdentifier(p.config.migrationsSchemaName) + `.` + + pq.QuoteIdentifier(p.config.migrationsTableName) + ` ADD COLUMN ` + columnName + ` ` + columnDef + if _, err := p.conn.ExecContext(context.Background(), alterQuery); err != nil { + return &database.Error{OrigErr: err, Query: []byte(alterQuery)} + } + return nil +} + +// StoreMigration stores the up and down migration scripts for a given version +func (p *Postgres) StoreMigration(version uint, upScript, downScript []byte) error { + // Ensure the enhanced table exists + if err := p.ensureEnhancedVersionTable(); err != nil { + return err + } + + query := `INSERT INTO ` + pq.QuoteIdentifier(p.config.migrationsSchemaName) + `.` + + pq.QuoteIdentifier(p.config.migrationsTableName) + + ` (version, dirty, up_script, down_script) VALUES ($1, false, $2, $3) + ON CONFLICT (version) DO UPDATE SET + up_script = EXCLUDED.up_script, + down_script = EXCLUDED.down_script, + created_at = now()` + + _, err := p.conn.ExecContext(context.Background(), query, int64(version), string(upScript), string(downScript)) + if err != nil { + return &database.Error{OrigErr: err, Query: []byte(query)} + } + + return nil +} + +// GetMigration retrieves the stored migration scripts for a given version +func (p *Postgres) GetMigration(version uint) (upScript, downScript []byte, err error) { + query := `SELECT up_script, down_script FROM ` + pq.QuoteIdentifier(p.config.migrationsSchemaName) + `.` + + pq.QuoteIdentifier(p.config.migrationsTableName) + ` WHERE version = $1` + + var upScriptStr, downScriptStr sql.NullString + err = p.conn.QueryRowContext(context.Background(), query, int64(version)).Scan(&upScriptStr, &downScriptStr) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil, fmt.Errorf("migration version %d not found", version) + } + return nil, nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + + if upScriptStr.Valid { + upScript = []byte(upScriptStr.String) + } + if downScriptStr.Valid { + downScript = []byte(downScriptStr.String) + } + + return upScript, downScript, nil +} + +// GetStoredMigrations returns all migration versions that have scripts stored +func (p *Postgres) GetStoredMigrations() ([]uint, error) { + query := `SELECT version FROM ` + pq.QuoteIdentifier(p.config.migrationsSchemaName) + `.` + + pq.QuoteIdentifier(p.config.migrationsTableName) + + ` WHERE up_script IS NOT NULL OR down_script IS NOT NULL ORDER BY version ASC` + + rows, err := p.conn.QueryContext(context.Background(), query) + if err != nil { + return nil, &database.Error{OrigErr: err, Query: []byte(query)} + } + defer rows.Close() + + var versions []uint + for rows.Next() { + var version int64 + if err := rows.Scan(&version); err != nil { + return nil, err + } + versions = append(versions, uint(version)) + } + + return versions, rows.Err() +} + +// SyncMigrations ensures all available migrations up to maxVersion are stored in the database +func (p *Postgres) SyncMigrations(sourceDriver interface{}, maxVersion uint) error { + srcDriver, ok := sourceDriver.(source.Driver) + if !ok { + return fmt.Errorf("source driver must implement source.Driver interface") + } + + versions, err := p.collectVersions(srcDriver, maxVersion) + if err != nil { + return err + } + + return p.storeMigrations(srcDriver, versions) +} + +// collectVersions gets all migration versions up to maxVersion +func (p *Postgres) collectVersions(srcDriver source.Driver, maxVersion uint) ([]uint, error) { + first, err := srcDriver.First() + if err != nil { + return nil, fmt.Errorf("failed to get first migration: %w", err) + } + + var versions []uint + currentVersion := first + + for currentVersion <= maxVersion { + versions = append(versions, currentVersion) + + next, err := srcDriver.Next(currentVersion) + if err != nil { + if err.Error() == "file does not exist" { // Handle os.ErrNotExist + break + } + return nil, fmt.Errorf("failed to get next migration after %d: %w", currentVersion, err) + } + currentVersion = next + } + + return versions, nil +} + +// storeMigrations reads and stores migration scripts for the given versions +func (p *Postgres) storeMigrations(srcDriver source.Driver, versions []uint) error { + for _, version := range versions { + upScript, err := p.readMigrationScript(srcDriver, version, true) + if err != nil { + return err + } + + downScript, err := p.readMigrationScript(srcDriver, version, false) + if err != nil { + return err + } + + // Store the migration if we have at least one script + if len(upScript) > 0 || len(downScript) > 0 { + if err := p.StoreMigration(version, upScript, downScript); err != nil { + return fmt.Errorf("failed to store migration %d: %w", version, err) + } + } + } + + return nil +} + +// readMigrationScript reads a migration script (up or down) for a given version +func (p *Postgres) readMigrationScript(srcDriver source.Driver, version uint, isUp bool) ([]byte, error) { + var reader io.ReadCloser + var err error + + if isUp { + reader, _, err = srcDriver.ReadUp(version) + } else { + reader, _, err = srcDriver.ReadDown(version) + } + + if err != nil { + // It's OK if migration doesn't exist + return nil, nil + } + + defer reader.Close() + script, err := io.ReadAll(reader) + if err != nil { + direction := "up" + if !isUp { + direction = "down" + } + return nil, fmt.Errorf("failed to read %s migration %d: %w", direction, version, err) + } + + return script, nil +} diff --git a/database/storage.go b/database/storage.go new file mode 100644 index 000000000..c35b53c6e --- /dev/null +++ b/database/storage.go @@ -0,0 +1,31 @@ +package database + +// MigrationStorageDriver extends the basic Driver interface to support +// storing and retrieving migration scripts in the database itself. +// This is useful for dirty state handling when shared storage isn't available. +type MigrationStorageDriver interface { + Driver + + // StoreMigration stores the up and down migration scripts for a given version + // in the database. This allows for dirty state recovery without external files. + StoreMigration(version uint, upScript, downScript []byte) error + + // GetMigration retrieves the stored migration scripts for a given version. + // Returns the up and down scripts, or an error if the version doesn't exist. + GetMigration(version uint) (upScript, downScript []byte, err error) + + // GetStoredMigrations returns all migration versions that have scripts stored + // in the database, sorted in ascending order. + GetStoredMigrations() ([]uint, error) + + // SyncMigrations ensures all available migrations up to maxVersion are stored + // in the database. This should be called during migration runs to keep + // the database in sync with available migration files. + SyncMigrations(sourceDriver interface{}, maxVersion uint) error +} + +// SupportsStorage checks if a driver supports migration script storage +func SupportsStorage(driver Driver) bool { + _, ok := driver.(MigrationStorageDriver) + return ok +} diff --git a/database_source.go b/database_source.go new file mode 100644 index 000000000..f3105dbf4 --- /dev/null +++ b/database_source.go @@ -0,0 +1,129 @@ +package migrate + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/golang-migrate/migrate/v4/database" + "github.com/golang-migrate/migrate/v4/source" +) + +// DatabaseSource implements source.Driver by reading migrations from database storage +type DatabaseSource struct { + storageDriver database.MigrationStorageDriver + logger Logger + versions []uint +} + +var _ source.Driver = &DatabaseSource{} + +// Open is not used for DatabaseSource as it's created directly +func (d *DatabaseSource) Open(url string) (source.Driver, error) { + return d, nil +} + +// Close closes the database source +func (d *DatabaseSource) Close() error { + return nil +} + +// First returns the first migration version available in the database +func (d *DatabaseSource) First() (version uint, err error) { + if err := d.loadVersions(); err != nil { + return 0, err + } + + if len(d.versions) == 0 { + return 0, os.ErrNotExist + } + + return d.versions[0], nil +} + +// Prev returns the previous migration version relative to the current version +func (d *DatabaseSource) Prev(version uint) (prevVersion uint, err error) { + if err := d.loadVersions(); err != nil { + return 0, err + } + + for i, v := range d.versions { + if v == version && i > 0 { + return d.versions[i-1], nil + } + } + + return 0, os.ErrNotExist +} + +// Next returns the next migration version relative to the current version +func (d *DatabaseSource) Next(version uint) (nextVersion uint, err error) { + if err := d.loadVersions(); err != nil { + return 0, err + } + + for i, v := range d.versions { + if v == version && i < len(d.versions)-1 { + return d.versions[i+1], nil + } + } + + return 0, os.ErrNotExist +} + +// ReadUp reads the up migration for the specified version from the database +func (d *DatabaseSource) ReadUp(version uint) (r io.ReadCloser, identifier string, err error) { + upScript, _, err := d.storageDriver.GetMigration(version) + if err != nil { + return nil, "", err + } + + if len(upScript) == 0 { + return nil, "", os.ErrNotExist + } + + reader := io.NopCloser(strings.NewReader(string(upScript))) + identifier = fmt.Sprintf("%d.up", version) + + return reader, identifier, nil +} + +// ReadDown reads the down migration for the specified version from the database +func (d *DatabaseSource) ReadDown(version uint) (r io.ReadCloser, identifier string, err error) { + _, downScript, err := d.storageDriver.GetMigration(version) + if err != nil { + return nil, "", err + } + + if len(downScript) == 0 { + return nil, "", os.ErrNotExist + } + + reader := io.NopCloser(strings.NewReader(string(downScript))) + identifier = fmt.Sprintf("%d.down", version) + + return reader, identifier, nil +} + +// loadVersions loads available migration versions from the database +func (d *DatabaseSource) loadVersions() error { + if d.versions != nil { + return nil // Already loaded + } + + versions, err := d.storageDriver.GetStoredMigrations() + if err != nil { + return fmt.Errorf("failed to load migrations from database: %w", err) + } + + d.versions = versions + return nil +} + +// logPrintf writes to the logger if available +func (d *DatabaseSource) logPrintf(format string, v ...interface{}) { + if d.logger != nil { + d.logger.Printf(format, v...) + } +} diff --git a/migrate.go b/migrate.go index 67b8866ec..03eeda643 100644 --- a/migrate.go +++ b/migrate.go @@ -289,6 +289,11 @@ func (m *Migrate) Migrate(version uint) error { return m.unlockErr(err) } + // Sync migration scripts to database if supported + if err := m.syncMigrationsToDatabase(version); err != nil { + return m.unlockErr(err) + } + // if the dirty flag is passed to the 'goto' command, handle the dirty state if dirty { if m.IsDirtyHandlingEnabled() { @@ -1082,6 +1087,43 @@ func (m *Migrate) logErr(err error) { } func (m *Migrate) handleDirtyState() error { + // Check if database supports migration storage + storageDriver, supportsStorage := m.databaseDrv.(database.MigrationStorageDriver) + + if supportsStorage { + return m.handleDirtyStateWithDatabase(storageDriver) + } + + return m.handleDirtyStateWithFiles() +} + +// handleDirtyStateWithDatabase handles dirty state using database-stored migrations +func (m *Migrate) handleDirtyStateWithDatabase(storageDriver database.MigrationStorageDriver) error { + // When using database storage, we can read the last successful migration + // from the database itself. The migration scripts are already stored there. + + // For now, we'll implement a simple approach: just create a temporary source + // that reads from the database instead of files + dbSource := &DatabaseSource{ + storageDriver: storageDriver, + logger: m.Log, + } + + // Temporarily replace the source driver + originalSource := m.sourceDrv + m.sourceDrv = dbSource + + // Restore original source when done + defer func() { + m.sourceDrv = originalSource + }() + + m.logPrintf("Handling dirty state using database-stored migrations") + return nil +} + +// handleDirtyStateWithFiles handles dirty state using file-based approach +func (m *Migrate) handleDirtyStateWithFiles() error { // Perform the following actions when the database state is dirty /* 1. Update the source driver to read the migrations from the destination path @@ -1191,3 +1233,21 @@ func (m *Migrate) copyFiles() error { m.logPrintf("Successfully Copied files from %s to %s", m.dirtyStateConf.srcPath, m.dirtyStateConf.destPath) return nil } + +// syncMigrationsToDatabase syncs migration scripts to the database if the driver supports it +func (m *Migrate) syncMigrationsToDatabase(targetVersion uint) error { + // Check if the database driver supports migration storage + storageDriver, ok := m.databaseDrv.(database.MigrationStorageDriver) + if !ok { + // Driver doesn't support storage, skip silently + return nil + } + + // Sync migrations up to the target version + if err := storageDriver.SyncMigrations(m.sourceDrv, targetVersion); err != nil { + return fmt.Errorf("failed to sync migrations to database: %w", err) + } + + m.logVerbosePrintf("Successfully synced migrations up to version %d to database", targetVersion) + return nil +}