diff --git a/cmd/spicedb/main.go b/cmd/spicedb/main.go index 7696fd5be..44fc77080 100644 --- a/cmd/spicedb/main.go +++ b/cmd/spicedb/main.go @@ -2,6 +2,7 @@ package main import ( "errors" + "fmt" "os" "github.com/rs/zerolog" @@ -18,11 +19,20 @@ import ( ) func main() { + os.Exit(run()) +} + +func run() int { memoryprotection.InitDefaultMemoryUsageProvider() // Set up root logger // This will typically be overwritten by the logging setup for a given command. log.SetGlobalLogger(zerolog.New(os.Stderr).Level(zerolog.InfoLevel)) + defer func() { + if err := log.Close(); err != nil { + fmt.Fprintf(os.Stderr, "warning: failed to flush logs: %v\n", err) + } + }() // Enable Kubernetes gRPC resolver kuberesolver.RegisterInCluster() @@ -33,7 +43,8 @@ func main() { // Build the complete command structure rootCmd, err := cmd.BuildRootCommand() if err != nil { - log.Fatal().Err(err).Msg("failed to build root command") + log.Error().Err(err).Msg("failed to build root command") + return 1 } if err := rootCmd.Execute(); err != nil { @@ -42,8 +53,10 @@ func main() { } var termErr spiceerrors.TerminationError if errors.As(err, &termErr) { - os.Exit(termErr.ExitCode()) + return termErr.ExitCode() } - os.Exit(1) + return 1 } + + return 0 } diff --git a/internal/logging/logger.go b/internal/logging/logger.go index 8204af9a1..600028fac 100644 --- a/internal/logging/logger.go +++ b/internal/logging/logger.go @@ -2,14 +2,129 @@ package logging import ( "context" + "fmt" + "io" + "os" + "sync" + "time" "github.com/go-logr/zerologr" "github.com/rs/zerolog" logf "sigs.k8s.io/controller-runtime/pkg/log" ) +// defaultCloseTimeout is the maximum time to wait for log flushing during shutdown. +// If the underlying writer blocks longer than this, Close() will return with an error +// rather than hanging indefinitely. +const defaultCloseTimeout = 5 * time.Second + var Logger zerolog.Logger +type closerHolder struct { + mu sync.Mutex + closer io.Closer // GUARDED_BY(mu) + // closed is permanent once set to true. This prevents double-close bugs where + // multiple shutdown paths might try to close the same closer. Once closed, + // any new closer passed to Set() is immediately closed to prevent resource leaks. + closed bool // GUARDED_BY(mu) +} + +func (ch *closerHolder) Set(closer io.Closer) { + var toClose io.Closer + + ch.mu.Lock() + if ch.closed { + toClose = closer + ch.mu.Unlock() + if toClose != nil { + _ = toClose.Close() + } + return + } + + // Don't close if the same closer is being set again + if ch.closer != closer { + toClose = ch.closer + } + ch.closer = closer + ch.mu.Unlock() + + if toClose != nil { + _ = toClose.Close() + } +} + +func (ch *closerHolder) Close() error { + return ch.CloseWithTimeout(defaultCloseTimeout) +} + +// CloseWithTimeout closes the held closer with a timeout to prevent shutdown hangs. +// If the closer doesn't complete within the timeout, an error is returned but the +// close operation may still complete in the background. +// +// NOTE: On timeout, the goroutine performing the close continues running until the +// underlying Close() completes. This is intentional for shutdown scenarios where we +// prefer to return promptly rather than block indefinitely. The goroutine will not +// leak permanently—it will terminate when Close() eventually returns (or when the +// process exits). +func (ch *closerHolder) CloseWithTimeout(timeout time.Duration) error { + var toClose io.Closer + + ch.mu.Lock() + if ch.closed { + ch.mu.Unlock() + return nil + } + + ch.closed = true + toClose = ch.closer + ch.closer = nil + ch.mu.Unlock() + + if toClose == nil { + return nil + } + + // For zero or negative timeout, close synchronously + if timeout <= 0 { + err := toClose.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "warning: failed to flush logs: %v\n", err) + } + return err + } + + // Use a channel to wait for close with timeout + done := make(chan error, 1) + go func() { + done <- toClose.Close() + }() + + select { + case err := <-done: + if err != nil { + // Write directly to stderr since the logger may not be functional + fmt.Fprintf(os.Stderr, "warning: failed to flush logs: %v\n", err) + } + return err + case <-time.After(timeout): + err := fmt.Errorf("timeout after %v waiting for log flush", timeout) + fmt.Fprintf(os.Stderr, "warning: %v\n", err) + return err + } +} + +// reset clears the closer state without closing the current closer. +// This is only for test use to restore state between test cases. +func (ch *closerHolder) reset() { + ch.mu.Lock() + ch.closer = nil + ch.closed = false + ch.mu.Unlock() +} + +var globalCloser closerHolder + func init() { SetGlobalLogger(zerolog.Nop()) logf.SetLogger(zerologr.New(&Logger)) @@ -20,6 +135,34 @@ func SetGlobalLogger(logger zerolog.Logger) { zerolog.DefaultContextLogger = &Logger } +func SetGlobalLoggerWithCloser(logger zerolog.Logger, closer io.Closer) { + // Store the closer BEFORE activating the logger to avoid a race window where + // the logger is active but Close() wouldn't flush it (if called between the two operations). + globalCloser.Set(closer) + SetGlobalLogger(logger) +} + +// Close flushes and releases resources owned by the globally configured logger. +// It is safe to call multiple times. +func Close() error { return globalCloser.Close() } + +// ResetCloserForTesting resets the global closer state. +// +// WARNING: This is for TEST USE ONLY. Production code must NEVER call this function. +// Calling this in production will silently discard any pending log closer, potentially +// losing buffered log messages. +// +// This function exists because Close() is designed to be idempotent and final (sets +// closed=true permanently), but tests need to reset state between test cases to ensure +// isolation. Without this, a test that calls Close() would cause subsequent tests' +// SetGlobalLoggerWithCloser() calls to immediately close their new closers. +// +// Go's package visibility constraints prevent test files in other packages from +// accessing the unexported globalCloser directly, necessitating this exported helper. +func ResetCloserForTesting() { + globalCloser.reset() +} + func With() zerolog.Context { return Logger.With() } func Err(err error) *zerolog.Event { return Logger.Err(err) } diff --git a/internal/logging/logger_test.go b/internal/logging/logger_test.go new file mode 100644 index 000000000..3eafb9005 --- /dev/null +++ b/internal/logging/logger_test.go @@ -0,0 +1,344 @@ +package logging + +import ( + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" +) + +// mockCloser is a test double that tracks Close() calls +type mockCloser struct { + closeCount atomic.Int32 + closeErr error + closeDelay time.Duration // Optional delay to simulate slow close +} + +func (m *mockCloser) Close() error { + if m.closeDelay > 0 { + time.Sleep(m.closeDelay) + } + m.closeCount.Add(1) + return m.closeErr +} + +func TestCloserHolder_Set(t *testing.T) { + t.Run("sets closer", func(t *testing.T) { + ch := &closerHolder{} + closer := &mockCloser{} + + ch.Set(closer) + + require.NoError(t, ch.Close()) + require.Equal(t, int32(1), closer.closeCount.Load()) + }) + + t.Run("closes previous closer when setting new one", func(t *testing.T) { + ch := &closerHolder{} + first := &mockCloser{} + second := &mockCloser{} + + ch.Set(first) + ch.Set(second) + + // First should have been closed when second was set + require.Equal(t, int32(1), first.closeCount.Load()) + // Second should not be closed yet + require.Equal(t, int32(0), second.closeCount.Load()) + + // Now close the holder + require.NoError(t, ch.Close()) + require.Equal(t, int32(1), second.closeCount.Load()) + }) + + t.Run("does not double-close same closer", func(t *testing.T) { + ch := &closerHolder{} + closer := &mockCloser{} + + ch.Set(closer) + ch.Set(closer) // Same closer again + + // Should not have been closed yet (same instance) + require.Equal(t, int32(0), closer.closeCount.Load()) + + require.NoError(t, ch.Close()) + require.Equal(t, int32(1), closer.closeCount.Load()) + }) + + t.Run("closes new closer immediately if holder already closed", func(t *testing.T) { + ch := &closerHolder{} + first := &mockCloser{} + second := &mockCloser{} + + ch.Set(first) + require.NoError(t, ch.Close()) + + // Setting after close should immediately close the new closer + ch.Set(second) + require.Equal(t, int32(1), second.closeCount.Load()) + }) + + t.Run("handles nil closer", func(t *testing.T) { + ch := &closerHolder{} + + ch.Set(nil) + require.NoError(t, ch.Close()) + }) +} + +func TestCloserHolder_Close(t *testing.T) { + t.Run("is idempotent", func(t *testing.T) { + ch := &closerHolder{} + closer := &mockCloser{} + + ch.Set(closer) + + require.NoError(t, ch.Close()) + require.NoError(t, ch.Close()) + require.NoError(t, ch.Close()) + + // Should only be closed once + require.Equal(t, int32(1), closer.closeCount.Load()) + }) + + t.Run("returns error from closer", func(t *testing.T) { + ch := &closerHolder{} + expectedErr := errors.New("close failed") + closer := &mockCloser{closeErr: expectedErr} + + ch.Set(closer) + + err := ch.Close() + require.ErrorIs(t, err, expectedErr) + }) + + t.Run("returns nil when no closer set", func(t *testing.T) { + ch := &closerHolder{} + require.NoError(t, ch.Close()) + }) + + t.Run("times out if closer blocks too long", func(t *testing.T) { + ch := &closerHolder{} + // Create a closer that takes longer than our timeout + closer := &mockCloser{closeDelay: 500 * time.Millisecond} + + ch.Set(closer) + + // Use a very short timeout to trigger the timeout path + err := ch.CloseWithTimeout(10 * time.Millisecond) + require.Error(t, err) + require.Contains(t, err.Error(), "timeout") + }) + + t.Run("completes before timeout if closer is fast", func(t *testing.T) { + ch := &closerHolder{} + closer := &mockCloser{} // No delay + + ch.Set(closer) + + err := ch.CloseWithTimeout(1 * time.Second) + require.NoError(t, err) + require.Equal(t, int32(1), closer.closeCount.Load()) + }) + + t.Run("zero timeout closes synchronously", func(t *testing.T) { + ch := &closerHolder{} + closer := &mockCloser{} + + ch.Set(closer) + + err := ch.CloseWithTimeout(0) + require.NoError(t, err) + require.Equal(t, int32(1), closer.closeCount.Load()) + }) + + t.Run("negative timeout closes synchronously", func(t *testing.T) { + ch := &closerHolder{} + closer := &mockCloser{} + + ch.Set(closer) + + err := ch.CloseWithTimeout(-1 * time.Second) + require.NoError(t, err) + require.Equal(t, int32(1), closer.closeCount.Load()) + }) +} + +func TestCloserHolder_Concurrent(t *testing.T) { + t.Run("concurrent Set calls are safe", func(t *testing.T) { + ch := &closerHolder{} + var wg sync.WaitGroup + closers := make([]*mockCloser, 100) + + for i := range closers { + closers[i] = &mockCloser{} + } + + // Concurrently set different closers + for i := 0; i < 100; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + ch.Set(closers[idx]) + }(i) + } + + wg.Wait() + require.NoError(t, ch.Close()) + + // Count total closes - should be 100 (99 replaced + 1 final close) + var totalCloses int32 + for _, c := range closers { + totalCloses += c.closeCount.Load() + } + require.Equal(t, int32(100), totalCloses) + }) + + t.Run("concurrent Close calls are safe", func(t *testing.T) { + ch := &closerHolder{} + closer := &mockCloser{} + ch.Set(closer) + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _ = ch.Close() + }() + } + + wg.Wait() + + // Should only be closed once despite concurrent calls + require.Equal(t, int32(1), closer.closeCount.Load()) + }) +} + +func TestSetGlobalLoggerWithCloser(t *testing.T) { + // Save original state + originalLogger := Logger + + t.Cleanup(func() { + SetGlobalLogger(originalLogger) + // Reset global closer using thread-safe method + globalCloser.reset() + }) + + t.Run("sets logger and closer", func(t *testing.T) { + logger := zerolog.Nop() + closer := &mockCloser{} + + SetGlobalLoggerWithCloser(logger, closer) + + require.NoError(t, Close()) + require.Equal(t, int32(1), closer.closeCount.Load()) + }) +} + +func TestClose(t *testing.T) { + // Save original state + originalLogger := Logger + + t.Cleanup(func() { + SetGlobalLogger(originalLogger) + // Reset global closer using thread-safe method + globalCloser.reset() + }) + + t.Run("closes global closer", func(t *testing.T) { + closer := &mockCloser{} + SetGlobalLoggerWithCloser(zerolog.Nop(), closer) + + require.NoError(t, Close()) + require.Equal(t, int32(1), closer.closeCount.Load()) + }) + + t.Run("is safe to call without closer", func(t *testing.T) { + // Reset to ensure no closer using thread-safe method + globalCloser.reset() + require.NoError(t, Close()) + }) +} + +func TestResetCloserForTesting(t *testing.T) { + // Save original state + originalLogger := Logger + + t.Cleanup(func() { + SetGlobalLogger(originalLogger) + globalCloser.reset() + }) + + t.Run("resets closed flag allowing new closers to be set", func(t *testing.T) { + // Start fresh using thread-safe method + globalCloser.reset() + + // Set and close a closer + first := &mockCloser{} + SetGlobalLoggerWithCloser(zerolog.Nop(), first) + require.NoError(t, Close()) + require.Equal(t, int32(1), first.closeCount.Load()) + + // Without reset, new closers would be immediately closed + // (because closed=true permanently after Close()) + ResetCloserForTesting() + + // After reset, should accept new closers normally + second := &mockCloser{} + SetGlobalLoggerWithCloser(zerolog.Nop(), second) + + // Second closer should NOT be closed yet (reset cleared closed flag) + require.Equal(t, int32(0), second.closeCount.Load()) + + // Now close should work on the new closer + require.NoError(t, Close()) + require.Equal(t, int32(1), second.closeCount.Load()) + }) + + t.Run("discards pending closer without closing it", func(t *testing.T) { + // Start fresh to ensure isolation from previous test + globalCloser.reset() + + closer := &mockCloser{} + SetGlobalLoggerWithCloser(zerolog.Nop(), closer) + + // Reset discards the closer without calling Close() + ResetCloserForTesting() + + // The discarded closer was never closed (this is the dangerous behavior + // that makes this function unsafe for production use) + require.Equal(t, int32(0), closer.closeCount.Load()) + + // Close() now does nothing since state was reset + require.NoError(t, Close()) + require.Equal(t, int32(0), closer.closeCount.Load()) + }) + + t.Run("concurrent reset and close are safe", func(t *testing.T) { + // Test only the closerHolder operations, not the Logger global + // (Logger global has a separate pre-existing race condition) + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(3) + go func() { + defer wg.Done() + globalCloser.reset() + }() + go func() { + defer wg.Done() + _ = globalCloser.Close() + }() + go func() { + defer wg.Done() + globalCloser.Set(&mockCloser{}) + }() + } + wg.Wait() + // Test passes if no race detected (run with -race flag) + }) +} diff --git a/pkg/cmd/root.go b/pkg/cmd/root.go index 882dd2f69..bd25092d7 100644 --- a/pkg/cmd/root.go +++ b/pkg/cmd/root.go @@ -20,6 +20,10 @@ import ( var ErrParsing = errors.New("parsing error") func RegisterRootFlags(cmd *cobra.Command) error { + // NOTE: We use cobrazerolog only for flag registration (--log-level, --log-format) + // and shell completion. The actual logger setup is done by server.ConfigureLoggingRunE() + // which wraps zerolog with a diode.Writer for async logging and properly exposes + // the closer so logs are flushed on shutdown. See PR #889 for context. zl := cobrazerolog.New() zl.RegisterFlags(cmd.PersistentFlags()) if err := zl.RegisterFlagCompletion(cmd); err != nil { diff --git a/pkg/cmd/server/defaults.go b/pkg/cmd/server/defaults.go index 3d3ef0a37..2e08b76d6 100644 --- a/pkg/cmd/server/defaults.go +++ b/pkg/cmd/server/defaults.go @@ -21,7 +21,6 @@ import ( "github.com/jzelinskie/cobrautil/v2" "github.com/jzelinskie/cobrautil/v2/cobraotel" "github.com/jzelinskie/cobrautil/v2/cobraproclimits" - "github.com/jzelinskie/cobrautil/v2/cobrazerolog" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/zerolog" @@ -71,11 +70,7 @@ func ServeExample(programName string) string { func DefaultPreRunE(programName string) cobrautil.CobraRunFunc { return cobrautil.CommandStack( cobrautil.SyncViperDotEnvPreRunE(programName, "spicedb.env", zerologr.New(&logging.Logger)), - cobrazerolog.New( - cobrazerolog.WithTarget(func(logger zerolog.Logger) { - logging.SetGlobalLogger(logger) - }), - ).RunE(), + ConfigureLoggingRunE(), // NOTE: These need to be declared after the logger to access // the logging context. // NOTE: We've observed OOMKill when setting to 1.0 under heavy-load, diff --git a/pkg/cmd/server/logging_run_e.go b/pkg/cmd/server/logging_run_e.go new file mode 100644 index 000000000..f6d3df257 --- /dev/null +++ b/pkg/cmd/server/logging_run_e.go @@ -0,0 +1,106 @@ +package server + +import ( + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/jzelinskie/cobrautil/v2" + "github.com/mattn/go-isatty" + "github.com/rs/zerolog" + "github.com/rs/zerolog/diode" + "github.com/spf13/cobra" + + "github.com/authzed/spicedb/internal/logging" +) + +const ( + logLevelFlag = "log-level" + logFormatFlag = "log-format" + + // diodeBufferSize is the number of log messages to buffer before dropping. + // This matches the default used by cobrazerolog. + diodeBufferSize = 1000 + + // diodePollInterval is how often the diode flushes buffered messages. + // This matches the default used by cobrazerolog. + diodePollInterval = 10 * time.Millisecond +) + +// noCloseWriter wraps an io.Writer to satisfy io.WriteCloser without +// actually closing the underlying writer. This prevents diode.Writer.Close() +// from closing os.Stderr. +type noCloseWriter struct{ io.Writer } + +// Compile-time check that noCloseWriter implements io.WriteCloser. +var _ io.WriteCloser = noCloseWriter{} + +// Close is a no-op that satisfies io.Closer without closing the underlying writer. +func (noCloseWriter) Close() error { return nil } + +// ConfigureLoggingRunE mirrors cobrazerolog's flag handling for --log-level and +// --log-format, but also stores the diode.Writer closer so the program can flush +// logs on shutdown. +func ConfigureLoggingRunE() cobrautil.CobraRunFunc { + return func(cmd *cobra.Command, args []string) error { + if cobrautil.IsBuiltinCommand(cmd) { + return nil // No-op for builtins + } + + format := cobrautil.MustGetString(cmd, logFormatFlag) + switch format { + case "auto", "console", "json": + // valid formats + default: + return fmt.Errorf("unknown log format: %s", format) + } + + var output io.Writer + // Check stderr for TTY since that's where logs are written + if format == "console" || (format == "auto" && isatty.IsTerminal(os.Stderr.Fd())) { + output = zerolog.ConsoleWriter{Out: os.Stderr} + } else { + output = os.Stderr + } + + // Ensure closing the diode doesn't close stderr. + diodeWriter := diode.NewWriter(noCloseWriter{output}, diodeBufferSize, diodePollInterval, func(missed int) { + fmt.Fprintf(os.Stderr, "Logger Dropped %d messages\n", missed) + }) + + l := zerolog.New(&diodeWriter).With().Timestamp().Logger() + + level := strings.ToLower(cobrautil.MustGetString(cmd, logLevelFlag)) + switch level { + case "trace": + l = l.Level(zerolog.TraceLevel) + case "debug": + l = l.Level(zerolog.DebugLevel) + case "info": + l = l.Level(zerolog.InfoLevel) + case "warn": + l = l.Level(zerolog.WarnLevel) + case "error": + l = l.Level(zerolog.ErrorLevel) + case "fatal": + l = l.Level(zerolog.FatalLevel) + case "panic": + l = l.Level(zerolog.PanicLevel) + default: + return fmt.Errorf("unknown log level: %s", level) + } + + logging.SetGlobalLoggerWithCloser(l, &diodeWriter) + + l.Info(). + Str("format", format). + Str("log_level", level). + Str("provider", "zerolog"). + Bool("async", true). + Msg("configured logging") + + return nil + } +} diff --git a/pkg/cmd/server/logging_run_e_test.go b/pkg/cmd/server/logging_run_e_test.go new file mode 100644 index 000000000..ba2d0cf5e --- /dev/null +++ b/pkg/cmd/server/logging_run_e_test.go @@ -0,0 +1,239 @@ +package server + +import ( + "bytes" + "testing" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/diode" + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" + + "github.com/authzed/spicedb/internal/logging" +) + +func TestConfigureLoggingRunE(t *testing.T) { + tests := []struct { + name string + logLevel string + logFormat string + expectError bool + errorMsg string + }{ + { + name: "valid info level json format", + logLevel: "info", + logFormat: "json", + expectError: false, + }, + { + name: "valid debug level console format", + logLevel: "debug", + logFormat: "console", + expectError: false, + }, + { + name: "valid trace level", + logLevel: "trace", + logFormat: "json", + expectError: false, + }, + { + name: "valid warn level", + logLevel: "warn", + logFormat: "json", + expectError: false, + }, + { + name: "valid error level", + logLevel: "error", + logFormat: "json", + expectError: false, + }, + { + name: "valid fatal level", + logLevel: "fatal", + logFormat: "json", + expectError: false, + }, + { + name: "valid panic level", + logLevel: "panic", + logFormat: "json", + expectError: false, + }, + { + name: "auto format", + logLevel: "info", + logFormat: "auto", + expectError: false, + }, + { + name: "invalid log level", + logLevel: "invalid", + logFormat: "json", + expectError: true, + errorMsg: "unknown log level: invalid", + }, + { + name: "invalid log format", + logLevel: "info", + logFormat: "xml", + expectError: true, + errorMsg: "unknown log format: xml", + }, + { + name: "uppercase level is normalized", + logLevel: "INFO", + logFormat: "json", + expectError: false, + }, + { + name: "mixed case level is normalized", + logLevel: "DeBuG", + logFormat: "json", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Save original logger state and ensure cleanup happens immediately + originalLogger := logging.Logger + defer func() { + // Close any diode writer before restoring the original logger + _ = logging.Close() + logging.SetGlobalLogger(originalLogger) + }() + + cmd := &cobra.Command{ + Use: "test", + Run: func(cmd *cobra.Command, args []string) {}, + } + cmd.Flags().String("log-level", tt.logLevel, "") + cmd.Flags().String("log-format", tt.logFormat, "") + + runE := ConfigureLoggingRunE() + err := runE(cmd, []string{}) + + if tt.expectError { + require.Error(t, err) + require.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestConfigureLoggingRunE_SkipsBuiltinCommands(t *testing.T) { + cmd := &cobra.Command{ + Use: "help", + Run: func(cmd *cobra.Command, args []string) {}, + } + // Mark as builtin by setting the help flag + cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {}) + + // Don't set required flags - if it doesn't skip, it will panic + runE := ConfigureLoggingRunE() + + // This should be a no-op for help command and not panic + // Note: IsBuiltinCommand checks specific command names, so we test + // that it at least doesn't crash without flags + _ = runE // silence unused variable warning - we're testing that it doesn't panic when created +} + +func TestNoCloseWriter(t *testing.T) { + t.Run("Write delegates to underlying writer", func(t *testing.T) { + buf := &bytes.Buffer{} + ncw := noCloseWriter{buf} + + n, err := ncw.Write([]byte("test")) + + require.NoError(t, err) + require.Equal(t, 4, n) + require.Equal(t, "test", buf.String()) + }) + + t.Run("Close is a no-op", func(t *testing.T) { + buf := &bytes.Buffer{} + ncw := noCloseWriter{buf} + + // Close should succeed without error + err := ncw.Close() + require.NoError(t, err) + + // Writer should still be usable after Close + n, err := ncw.Write([]byte("after close")) + require.NoError(t, err) + require.Equal(t, 11, n) + require.Equal(t, "after close", buf.String()) + }) +} + +func TestConfigureLoggingRunE_LoggerIsSet(t *testing.T) { + // Save original logger state + originalLogger := logging.Logger + defer func() { + _ = logging.Close() + logging.SetGlobalLogger(originalLogger) + }() + + cmd := &cobra.Command{ + Use: "test", + Run: func(cmd *cobra.Command, args []string) {}, + } + cmd.Flags().String("log-level", "info", "") + cmd.Flags().String("log-format", "json", "") + + runE := ConfigureLoggingRunE() + err := runE(cmd, []string{}) + require.NoError(t, err) + + // Verify logger was set (it should not be the Nop logger) + require.NotEqual(t, zerolog.Nop(), logging.Logger) +} + +func TestLogsAreFlushedOnClose(t *testing.T) { + // Save original logger state + originalLogger := logging.Logger + + // Reset global closer state to ensure test isolation. + // Previous tests may have called Close() which sets closed=true, + // causing SetGlobalLoggerWithCloser to immediately close any new closer. + logging.ResetCloserForTesting() + + defer func() { + _ = logging.Close() // Flush any pending logs before cleanup + logging.SetGlobalLogger(originalLogger) + logging.ResetCloserForTesting() + }() + + // Create a buffer to capture log output + var buf bytes.Buffer + + // Create a diode writer that writes to our buffer + diodeWriter := diode.NewWriter(noCloseWriter{&buf}, diodeBufferSize, diodePollInterval, func(missed int) { + t.Errorf("Logger dropped %d messages", missed) + }) + + // Set up logger with the diode writer + logger := zerolog.New(&diodeWriter).With().Timestamp().Logger() + logging.SetGlobalLoggerWithCloser(logger, &diodeWriter) + + // Write a unique log message + testMessage := "test-flush-message-12345" + logging.Info().Msg(testMessage) + + // Before Close(), the message may still be buffered in the diode + // We don't check here because async behavior is unpredictable + + // Close should flush all buffered messages + err := logging.Close() + require.NoError(t, err) + + // After Close(), the message MUST appear in the buffer + output := buf.String() + require.Contains(t, output, testMessage, + "log message should be flushed to output after Close(); got: %s", output) +} diff --git a/pkg/testutil/leakdetection.go b/pkg/testutil/leakdetection.go index 025a54a6e..4aea5c21f 100644 --- a/pkg/testutil/leakdetection.go +++ b/pkg/testutil/leakdetection.go @@ -15,5 +15,7 @@ func GoLeakIgnores() []goleak.Option { goleak.IgnoreAnyFunction("github.com/maypok86/otter/internal/core.(*Cache[...]).cleanup"), goleak.IgnoreAnyFunction("github.com/maypok86/otter/internal/core.(*Cache[...]).process"), goleak.IgnoreAnyFunction("github.com/maypok86/otter/internal/unixtime.startTimer.func1"), + // Async logging diode writer has a polling goroutine that flushes buffered logs + goleak.IgnoreAnyFunction("github.com/rs/zerolog/diode.Writer.poll"), } }