Skip to content

Commit 7066ee4

Browse files
committed
Remove incremental vacuum (replaced by weekly full VACUUM)
Incremental vacuum was running after every purge operation but: - Only reclaims pages at END of file - Doesn't help with fragmentation in MIDDLE of file - Was effectively doing nothing (pages_vacuumed=0) - Weekly VACUUM INTO is much more effective Removed: - incrementalVacuum() function from database.go - Call to incrementalVacuum from processPurgeOperations - Kept database stats logging for monitoring Result: Cleaner code, weekly VACUUM handles all fragmentation
1 parent 20fbf62 commit 7066ee4

File tree

2 files changed

+13
-78
lines changed

2 files changed

+13
-78
lines changed

archive.go

Lines changed: 13 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -377,24 +377,18 @@ func (app app) purgeWorker(ctx context.Context) {
377377
func (app app) processPurgeOperations(ctx context.Context) error {
378378
logger := app.logger
379379

380-
// If no stories to purge, try to perform incremental vacuum
381-
// Use a reasonable number of pages (e.g., 1000) to avoid long operations
380+
// Log database stats for monitoring
381+
// Weekly VACUUM (Sunday 3 AM) handles fragmentation
382382
logger.Debug("Getting DB stats")
383383
size, freelist, fragmentation, err := app.ndb.getDatabaseStats()
384384
if err != nil {
385-
return errors.Wrap(err, "getDatabaseStats")
386-
}
387-
logger.Info("Database stats",
388-
"size_mb", float64(size)/(1024*1024),
389-
"freelist_pages", freelist,
390-
"fragmentation_pct", fragmentation)
391-
392-
const maxVacuumPages = 1000
393-
if err := app.ndb.incrementalVacuum(ctx, maxVacuumPages); err != nil {
394-
logger.Error("Failed to perform incremental vacuum", err)
395-
// Don't return the error - vacuuming is optional
385+
logger.Error("Failed to get database stats", err)
386+
} else {
387+
logger.Debug("Database stats",
388+
"size_mb", float64(size)/(1024*1024),
389+
"freelist_pages", freelist,
390+
"fragmentation_pct", fragmentation)
396391
}
397-
logger.Debug("Finished vacuum")
398392

399393
var purgedCount int
400394
var totalRowsPurged int64
@@ -517,7 +511,7 @@ func (app app) vacuumWorker(ctx context.Context) {
517511
select {
518512
case <-ticker.C:
519513
now := time.Now()
520-
514+
521515
// Only run on Sunday between 3 AM and 4 AM
522516
if now.Weekday() != time.Sunday {
523517
continue
@@ -572,12 +566,12 @@ func (app app) performWeeklyVacuum(ctx context.Context) error {
572566
// Create compacted copy using VACUUM INTO
573567
newDBPath := fmt.Sprintf("%s/frontpage_new.sqlite", ndb.sqliteDataDir)
574568
oldDBPath := fmt.Sprintf("%s/frontpage.sqlite", ndb.sqliteDataDir)
575-
backupPath := fmt.Sprintf("%s/frontpage_backup_%s.sqlite",
576-
ndb.sqliteDataDir,
569+
backupPath := fmt.Sprintf("%s/frontpage_backup_%s.sqlite",
570+
ndb.sqliteDataDir,
577571
time.Now().Format("2006_01_02"))
578572

579573
logger.Info("Creating compacted database copy", "target", newDBPath)
580-
574+
581575
// Use VACUUM INTO to create compacted copy (doesn't block reads)
582576
_, err := ndb.db.Exec(fmt.Sprintf("VACUUM INTO '%s'", newDBPath))
583577
if err != nil {
@@ -609,7 +603,7 @@ func (app app) performWeeklyVacuum(ctx context.Context) error {
609603

610604
// Reconnect to new database
611605
logger.Info("Reconnecting to compacted database")
612-
newDB, err := sql.Open("sqlite3_ext",
606+
newDB, err := sql.Open("sqlite3_ext",
613607
fmt.Sprintf("file:%s?_journal_mode=WAL&_busy_timeout=5000", oldDBPath))
614608
if err != nil {
615609
return errors.Wrap(err, "failed to reconnect to database")

database.go

Lines changed: 0 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -685,62 +685,3 @@ func (ndb newsDatabase) countStoriesNeedingPurge(ctx context.Context) (int, erro
685685

686686
return count, nil
687687
}
688-
689-
// incrementalVacuum performs a controlled vacuum operation during idle time
690-
// maxPages specifies the maximum number of pages to vacuum in this operation
691-
func (ndb newsDatabase) incrementalVacuum(ctx context.Context, maxPages int) error {
692-
// Get initial fragmentation stats
693-
var initialFragmentation float64
694-
var initialFreePages int64
695-
err := ndb.db.QueryRowContext(ctx, `
696-
SELECT
697-
(SELECT freelist_count FROM pragma_freelist_count()) as free_pages,
698-
ROUND(
699-
100.0 * (SELECT freelist_count FROM pragma_freelist_count()) /
700-
(SELECT page_count FROM pragma_page_count()), 1
701-
) as fragmentation_pct
702-
`).Scan(&initialFreePages, &initialFragmentation)
703-
if err != nil {
704-
return errors.Wrap(err, "checking initial fragmentation")
705-
}
706-
707-
if initialFreePages == 0 {
708-
return nil // Nothing to vacuum
709-
}
710-
711-
// Calculate how many pages to vacuum (min of free pages and maxPages)
712-
pagesToVacuum := initialFreePages
713-
if int64(maxPages) < pagesToVacuum {
714-
pagesToVacuum = int64(maxPages)
715-
}
716-
717-
// Perform the incremental vacuum
718-
_, err = ndb.db.ExecContext(ctx, fmt.Sprintf("PRAGMA incremental_vacuum(%d)", pagesToVacuum))
719-
if err != nil {
720-
return errors.Wrap(err, "incremental vacuum")
721-
}
722-
723-
// Get final fragmentation stats
724-
var finalFragmentation float64
725-
var finalFreePages int64
726-
err = ndb.db.QueryRowContext(ctx, `
727-
SELECT
728-
(SELECT freelist_count FROM pragma_freelist_count()) as free_pages,
729-
ROUND(
730-
100.0 * (SELECT freelist_count FROM pragma_freelist_count()) /
731-
(SELECT page_count FROM pragma_page_count()), 1
732-
) as fragmentation_pct
733-
`).Scan(&finalFreePages, &finalFragmentation)
734-
if err != nil {
735-
return errors.Wrap(err, "checking final fragmentation")
736-
}
737-
738-
// Log the results
739-
slog.Info("Incremental vacuum completed",
740-
"pages_vacuumed", initialFreePages-finalFreePages,
741-
"initial_fragmentation_pct", initialFragmentation,
742-
"final_fragmentation_pct", finalFragmentation,
743-
"remaining_free_pages", finalFreePages)
744-
745-
return nil
746-
}

0 commit comments

Comments
 (0)