From 58612d6f5021b2084024e9d8cd0e6eeb51d30af0 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Wed, 21 Aug 2024 07:59:21 -0400 Subject: [PATCH] CBG-3933 allow console logs to be rotated (#7058) * CBG-3933 allow console logs to be rotated * update log deletion tests - change log deletion to use regex to match all patterns of possible log paths - disable compression for console logs - restore stderr log collation * windows requires no slashes in path names * Apply suggestions from code review Co-authored-by: Ben Brooks * Update base/logger_console_test.go Co-authored-by: Ben Brooks * add missing import --------- Co-authored-by: Ben Brooks --- base/logger.go | 5 +- base/logger_audit_test.go | 2 +- base/logger_console.go | 42 ++--- base/logger_console_test.go | 111 ++++++++++++++ base/logger_file.go | 113 +++++++++----- base/logger_file_test.go | 256 +++++++++++++++++-------------- base/logging_config.go | 1 + base/logging_test.go | 52 ++++--- docs/api/components/schemas.yaml | 256 ++++++++++++++++++++----------- 9 files changed, 559 insertions(+), 279 deletions(-) diff --git a/base/logger.go b/base/logger.go index 6d5734dbec..cfc1c50654 100644 --- a/base/logger.go +++ b/base/logger.go @@ -61,8 +61,7 @@ func FlushLogBuffers() { // logCollationWorker will take log lines over the given channel, and buffer them until either the buffer is full, or the flushTimeout is exceeded. // This is to reduce the number of writes to the log files, in order to batch them up as larger collated chunks, whilst maintaining a low-level of latency with the flush timeout. -func logCollationWorker(collateBuffer chan string, flushChan chan struct{}, collateBufferWg *sync.WaitGroup, logger *log.Logger, maxBufferSize int, collateFlushTimeout time.Duration) { - +func logCollationWorker(loggerClosed chan struct{}, collateBuffer chan string, flushChan chan struct{}, collateBufferWg *sync.WaitGroup, logger *log.Logger, maxBufferSize int, collateFlushTimeout time.Duration) { // The initial duration of the timeout timer doesn't matter, // because we reset it whenever we buffer a log without flushing it. t := time.NewTimer(math.MaxInt64) @@ -97,6 +96,8 @@ func logCollationWorker(collateBuffer chan string, flushChan chan struct{}, coll logger.Print(strings.Join(logBuffer, "\n")) logBuffer = logBuffer[:0] } + case <-loggerClosed: + return } } } diff --git a/base/logger_audit_test.go b/base/logger_audit_test.go index d1ec98a9f3..68e523d54c 100644 --- a/base/logger_audit_test.go +++ b/base/logger_audit_test.go @@ -132,13 +132,13 @@ func TestAuditLoggerGlobalFields(t *testing.T) { } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { + ResetGlobalTestLogging(t) ctx := TestCtx(t) if testCase.contextFields != nil { ctx = AuditLogCtx(ctx, testCase.contextFields) } logger, err := NewAuditLogger(ctx, &AuditLoggerConfig{FileLoggerConfig: FileLoggerConfig{Enabled: BoolPtr(true)}}, tmpdir, 0, nil, testCase.globalFields) require.NoError(t, err) - defer assert.NoError(t, logger.Close()) auditLogger.Store(logger) startWarnCount := SyncGatewayStats.GlobalStats.ResourceUtilizationStats().WarnCount.Value() diff --git a/base/logger_console.go b/base/logger_console.go index ceccd9fd8b..7b4e915f96 100644 --- a/base/logger_console.go +++ b/base/logger_console.go @@ -19,8 +19,6 @@ import ( "path/filepath" "strconv" "sync" - - "gopkg.in/natefinch/lumberjack.v2" ) // ConsoleLogger is a file logger with a default output of stderr, and tunable log level/keys. @@ -55,8 +53,12 @@ func NewConsoleLogger(ctx context.Context, shouldLogLocation bool, config *Conso config = &ConsoleLoggerConfig{} } + cancelCtx, cancelFunc := context.WithCancel(ctx) + // validate and set defaults - if err := config.init(); err != nil { + rotationDoneChan, err := config.init(cancelCtx) + if err != nil { + defer cancelFunc() return nil, err } @@ -67,9 +69,13 @@ func NewConsoleLogger(ctx context.Context, shouldLogLocation bool, config *Conso LogKeyMask: &logKey, ColorEnabled: *config.ColorEnabled, FileLogger: FileLogger{ - Enabled: AtomicBool{}, - logger: log.New(config.Output, "", 0), - config: config.FileLoggerConfig, + Enabled: AtomicBool{}, + logger: log.New(config.Output, "", 0), + config: config.FileLoggerConfig, + rotationDoneChan: rotationDoneChan, + cancelFunc: cancelFunc, + name: "console", + closed: make(chan struct{}), }, isStderr: config.FileOutput == "" && *config.Enabled, config: *config, @@ -83,7 +89,7 @@ func NewConsoleLogger(ctx context.Context, shouldLogLocation bool, config *Conso logger.collateBufferWg = &sync.WaitGroup{} // Start up a single worker to consume messages from the buffer - go logCollationWorker(logger.collateBuffer, logger.flushChan, logger.collateBufferWg, logger.logger, *config.CollationBufferSize, consoleLoggerCollateFlushTimeout) + go logCollationWorker(logger.closed, logger.collateBuffer, logger.flushChan, logger.collateBufferWg, logger.logger, *config.CollationBufferSize, consoleLoggerCollateFlushTimeout) } // We can only log the console log location itself when logging has previously been set up and is being re-initialized from a config. @@ -179,29 +185,25 @@ func (l *ConsoleLogger) getConsoleLoggerConfig() *ConsoleLoggerConfig { } // init validates and sets any defaults for the given ConsoleLoggerConfig -func (lcc *ConsoleLoggerConfig) init() error { +func (lcc *ConsoleLoggerConfig) init(ctx context.Context) (chan struct{}, error) { if lcc == nil { - return errors.New("nil LogConsoleConfig") + return nil, errors.New("nil LogConsoleConfig") } - if err := lcc.initRotationConfig("console", 0, 0); err != nil { - return err + if err := lcc.initRotationConfig("console", 0, 0, false); err != nil { + return nil, err } + var rotationDoneChan chan struct{} // Default to os.Stderr if alternative output is not set if lcc.Output == nil && lcc.FileOutput == "" { lcc.Output = os.Stderr } else if lcc.FileOutput != "" { // Otherwise check permissions on the given output and create a Lumberjack logger if err := validateLogFileOutput(lcc.FileOutput); err != nil { - return err - } - lcc.Output = &lumberjack.Logger{ - Filename: filepath.FromSlash(lcc.FileOutput), - MaxSize: *lcc.Rotation.MaxSize, - MaxAge: *lcc.Rotation.MaxAge, - Compress: BoolDefault(lcc.Rotation.Compress, false), + return nil, err } + rotationDoneChan = lcc.initLumberjack(ctx, "console", filepath.FromSlash(lcc.FileOutput)) } // Default to disabled only when a log key or log level has not been specified @@ -225,7 +227,7 @@ func (lcc *ConsoleLoggerConfig) init() error { newLevel := LevelInfo lcc.LogLevel = &newLevel } else if *lcc.LogLevel < LevelNone || *lcc.LogLevel > levelCount { - return fmt.Errorf("invalid log level: %v", *lcc.LogLevel) + return nil, fmt.Errorf("invalid log level: %v", *lcc.LogLevel) } // If ColorEnabled is not explicitly set, use the value of $SG_COLOR @@ -249,7 +251,7 @@ func (lcc *ConsoleLoggerConfig) init() error { lcc.CollationBufferSize = &bufferSize } - return nil + return rotationDoneChan, nil } func mustInitConsoleLogger(ctx context.Context, config *ConsoleLoggerConfig) *ConsoleLogger { diff --git a/base/logger_console_test.go b/base/logger_console_test.go index a3aad6d257..b3881d96d2 100644 --- a/base/logger_console_test.go +++ b/base/logger_console_test.go @@ -13,9 +13,14 @@ package base import ( "fmt" "io" + "path/filepath" + "slices" + "sync" "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var consoleShouldLogTests = []struct { @@ -287,3 +292,109 @@ func TestConsoleLogDefaults(t *testing.T) { }) } } + +func TestConsoleIrregularLogPaths(t *testing.T) { + // override min rotation interval for testing + originalMinRotationInterval := minLogRotationInterval + minLogRotationInterval = time.Millisecond * 10 + defer func() { minLogRotationInterval = originalMinRotationInterval }() + + testCases := []struct { + name string + logPath string + }{ + { + name: ".log extension", + logPath: "foo.log", + // take foo-2021-01-01T00-00-00.000.log + }, + { + name: "no extension", + logPath: "foo", + // foo-2021-01-01T00-00-00.000 + }, + { + name: "multiple dots", + logPath: "two.ext.log", + // two.ext-2021-01-01T00-00-00.000.log + }, + { + name: "start with .", + logPath: ".hidden.log", + // .hidden-2021-01-01T00-00-00.000.log + }, + { + name: "start with ., no ext", + logPath: ".hidden", + // -2021-01-01T00-00-00.000.hidden + }, + } + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + tempdir := lumberjackTempDir(t) + config := &ConsoleLoggerConfig{ + LogLevel: logLevelPtr(LevelDebug), + LogKeys: []string{"HTTP"}, + FileOutput: filepath.Join(tempdir, test.logPath), + FileLoggerConfig: FileLoggerConfig{ + Enabled: BoolPtr(true), + CollationBufferSize: IntPtr(0), + Rotation: logRotationConfig{ + RotationInterval: NewConfigDuration(10 * time.Millisecond), + }, + }} + + ctx := TestCtx(t) + logger, err := NewConsoleLogger(ctx, false, config) + require.NoError(t, err) + + // ensure logging is done before closing the logger + wg := sync.WaitGroup{} + wg.Add(1) + doneChan := make(chan struct{}) + defer func() { + close(doneChan) + wg.Wait() + assert.NoError(t, logger.Close()) + }() + go func() { + for { + select { + case <-doneChan: + wg.Done() + return + default: + logger.logf("some text") + } + } + }() + require.EventuallyWithT(t, func(c *assert.CollectT) { + filenames := getDirFiles(t, tempdir) + assert.Contains(c, filenames, test.logPath) + assert.Greater(c, len(filenames), 2) + }, time.Second, 10*time.Millisecond) + + // add a few non-matching files to the directory for negative testing + nonMatchingFileNames := []string{ + "console.log", + "consoellog", + "console.log.txt", + ".console", + "consolelog-2021-01-01T00-00-00.000", + "console-2021-01-01T00-00-00.000.log", + } + for _, name := range nonMatchingFileNames { + require.NoError(t, makeTestFile(1, name, tempdir)) + } + + _, pattern := getDeletionDirAndRegexp(filepath.Join(tempdir, test.logPath)) + for _, filename := range getDirFiles(t, tempdir) { + if slices.Contains(nonMatchingFileNames, filename) { + require.NotRegexp(t, pattern, filename) + } else { + require.Regexp(t, pattern, filename) + } + } + }) + } +} diff --git a/base/logger_file.go b/base/logger_file.go index ae25c3fed2..b0b23b0d19 100644 --- a/base/logger_file.go +++ b/base/logger_file.go @@ -17,6 +17,7 @@ import ( "log" "os" "path/filepath" + "regexp" "runtime/debug" "strings" "sync" @@ -43,6 +44,11 @@ var ( belowMinValueFmt = "%s for %v was set to %v which is below the minimum of %v" aboveMaxValueFmt = "%s for %v was set to %v which is above the maximum of %v" + + // lumberjack backupTimeFormat = "2006-01-02T15-04-05.000" + lumberjackRotationMidfix = `-\d{4}-\d{2}-\d{2}T\d{2}-\d{2}-\d{2}.\d{3}` + // lumberjack compressSuffix = ".gz" + optionalCompressSuffix = `(\.gz)?` ) type FileLogger struct { @@ -51,6 +57,7 @@ type FileLogger struct { // collateBuffer is used to store log entries to batch up multiple logs. collateBuffer chan string collateBufferWg *sync.WaitGroup + closed chan struct{} flushChan chan struct{} level LogLevel name string @@ -78,7 +85,7 @@ type logRotationConfig struct { LocalTime *bool `json:"localtime,omitempty"` // If true, it uses the computer's local time to format the backup timestamp. RotatedLogsSizeLimit *int `json:"rotated_logs_size_limit,omitempty"` // Max Size of log files before deletion RotationInterval *ConfigDuration `json:"rotation_interval,omitempty"` // Interval at which logs are rotated - Compress *bool `json:"-"` // Enable log compression - not exposed in config + compress *bool `json:"-"` // Compress rotated logs, not exposed to users } // NewFileLogger returns a new FileLogger from a config. @@ -87,11 +94,11 @@ func NewFileLogger(ctx context.Context, config *FileLoggerConfig, level LogLevel config = &FileLoggerConfig{} } - rotationDoneChan := make(chan struct{}) cancelCtx, cancelFunc := context.WithCancel(ctx) // validate and set defaults - if err := config.init(cancelCtx, level, name, logFilePath, minAge, rotationDoneChan); err != nil { + rotationDoneChan, err := config.init(cancelCtx, level, name, logFilePath, minAge) + if err != nil { cancelFunc() return nil, err } @@ -103,6 +110,7 @@ func NewFileLogger(ctx context.Context, config *FileLoggerConfig, level LogLevel output: config.Output, logger: log.New(config.Output, "", 0), config: *config, + closed: make(chan struct{}), cancelFunc: cancelFunc, rotationDoneChan: rotationDoneChan, } @@ -119,7 +127,7 @@ func NewFileLogger(ctx context.Context, config *FileLoggerConfig, level LogLevel logger.collateBufferWg = &sync.WaitGroup{} // Start up a single worker to consume messages from the buffer - go logCollationWorker(logger.collateBuffer, logger.flushChan, logger.collateBufferWg, logger.logger, *config.CollationBufferSize, fileLoggerCollateFlushTimeout) + go logCollationWorker(logger.closed, logger.collateBuffer, logger.flushChan, logger.collateBufferWg, logger.logger, *config.CollationBufferSize, fileLoggerCollateFlushTimeout) } return logger, nil @@ -152,13 +160,18 @@ func (l *FileLogger) Rotate() error { // Close cancels the log rotation rotation and the underlying file descriptor for the active log file. func (l *FileLogger) Close() error { + // cancelFunc will stop the log rotionation/deletion goroutine + // once all log rotation is done and log output is closed, shut down the logCollationWorker + defer close(l.closed) // cancel the log rotation goroutine and wait for it to stop if l.cancelFunc != nil { l.cancelFunc() } + // wait for the rotation goroutine to stop if l.rotationDoneChan != nil { <-l.rotationDoneChan } + if c, ok := l.output.(io.Closer); ok { return c.Close() } @@ -166,7 +179,7 @@ func (l *FileLogger) Close() error { } func (l *FileLogger) String() string { - return "FileLogger(" + l.level.String() + ")" + return "FileLogger(" + l.name + ")" } // logf will put the given message into the collation buffer if it exists, @@ -219,9 +232,9 @@ func (l *FileLogger) getFileLoggerConfig() *FileLoggerConfig { return &fileLoggerConfig } -func (lfc *FileLoggerConfig) init(ctx context.Context, level LogLevel, name string, logFilePath string, minAge int, rotationDoneChan chan struct{}) error { +func (lfc *FileLoggerConfig) init(ctx context.Context, level LogLevel, name string, logFilePath string, minAge int) (chan struct{}, error) { if lfc == nil { - return errors.New("nil LogFileConfig") + return nil, errors.New("nil LogFileConfig") } if lfc.Enabled == nil { @@ -229,19 +242,13 @@ func (lfc *FileLoggerConfig) init(ctx context.Context, level LogLevel, name stri lfc.Enabled = BoolPtr(level < LevelDebug) } - if err := lfc.initRotationConfig(name, defaultMaxSize, minAge); err != nil { - return err + if err := lfc.initRotationConfig(name, defaultMaxSize, minAge, true); err != nil { + return nil, err } - var rotateableLogger *lumberjack.Logger + var rotationDoneChan chan struct{} if lfc.Output == nil { - rotateableLogger = newLumberjackOutput( - filepath.Join(filepath.FromSlash(logFilePath), logFilePrefix+name+".log"), - *lfc.Rotation.MaxSize, - *lfc.Rotation.MaxAge, - BoolDefault(lfc.Rotation.Compress, true), - ) - lfc.Output = rotateableLogger + rotationDoneChan = lfc.initLumberjack(ctx, name, filepath.Join(filepath.FromSlash(logFilePath), logFilePrefix+name+".log")) } if lfc.CollationBufferSize == nil { @@ -253,9 +260,24 @@ func (lfc *FileLoggerConfig) init(ctx context.Context, level LogLevel, name stri lfc.CollationBufferSize = &bufferSize } + return rotationDoneChan, nil +} + +// initLumberjack will create a new Lumberjack logger from the given config settings. Returns a doneChan which fires when the log rotation is stopped. +func (lfc *FileLoggerConfig) initLumberjack(ctx context.Context, name string, lumberjackFilename string) chan struct{} { + rotationDoneChan := make(chan struct{}) + dir, logPattern := getDeletionDirAndRegexp(lumberjackFilename) + rotateableLogger := &lumberjack.Logger{ + Filename: lumberjackFilename, + MaxSize: *lfc.Rotation.MaxSize, + MaxAge: *lfc.Rotation.MaxAge, + Compress: *lfc.Rotation.compress, + } + lfc.Output = rotateableLogger + var rotationTicker *time.Ticker var rotationTickerCh <-chan time.Time - if i := lfc.Rotation.RotationInterval.Value(); i > 0 && rotateableLogger != nil { + if i := lfc.Rotation.RotationInterval.Value(); i > 0 { rotationTicker = time.NewTicker(i) rotationTickerCh = rotationTicker.C } @@ -277,7 +299,7 @@ func (lfc *FileLoggerConfig) init(ctx context.Context, level LogLevel, name stri close(rotationDoneChan) return case <-logDeletionTicker.C: - err := runLogDeletion(ctx, logFilePath, level.String(), int(float64(*lfc.Rotation.RotatedLogsSizeLimit)*rotatedLogsLowWatermarkMultiplier), *lfc.Rotation.RotatedLogsSizeLimit) + err := runLogDeletion(ctx, dir, logPattern, int(float64(*lfc.Rotation.RotatedLogsSizeLimit)*rotatedLogsLowWatermarkMultiplier), *lfc.Rotation.RotatedLogsSizeLimit) if err != nil { WarnfCtx(ctx, "%s", err) } @@ -290,11 +312,11 @@ func (lfc *FileLoggerConfig) init(ctx context.Context, level LogLevel, name stri } } }() - - return nil + return rotationDoneChan } -func (lfc *FileLoggerConfig) initRotationConfig(name string, defaultMaxSize, minAge int) error { +// initRotationConfig will validate the log rotation settings and set defaults where necessary. +func (lfc *FileLoggerConfig) initRotationConfig(name string, defaultMaxSize, minAge int, compress bool) error { if lfc.Rotation.MaxSize == nil { lfc.Rotation.MaxSize = &defaultMaxSize } else if *lfc.Rotation.MaxSize == 0 { @@ -328,23 +350,16 @@ func (lfc *FileLoggerConfig) initRotationConfig(name string, defaultMaxSize, min } } - return nil -} - -func newLumberjackOutput(filename string, maxSize, maxAge int, compress bool) *lumberjack.Logger { - return &lumberjack.Logger{ - Filename: filename, - MaxSize: maxSize, - MaxAge: maxAge, - Compress: compress, + if lfc.Rotation.compress == nil { + lfc.Rotation.compress = &compress } + return nil } // runLogDeletion will delete rotated logs for the supplied logLevel. It will only perform these deletions when the // cumulative size of the logs are above the supplied sizeLimitMB. // logDirectory is the supplied directory where the logs are stored. -func runLogDeletion(ctx context.Context, logDirectory string, logLevel string, sizeLimitMBLowWatermark int, sizeLimitMBHighWatermark int) (err error) { - +func runLogDeletion(ctx context.Context, logDirectory string, logPattern *regexp.Regexp, sizeLimitMBLowWatermark int, sizeLimitMBHighWatermark int) (err error) { sizeLimitMBLowWatermark = sizeLimitMBLowWatermark * 1024 * 1024 // Convert MB input to bytes sizeLimitMBHighWatermark = sizeLimitMBHighWatermark * 1024 * 1024 // Convert MB input to bytes @@ -361,7 +376,7 @@ func runLogDeletion(ctx context.Context, logDirectory string, logLevel string, s willDelete := false for i := len(files) - 1; i >= 0; i-- { file := files[i] - if strings.HasPrefix(file.Name(), logFilePrefix+logLevel) && strings.HasSuffix(file.Name(), ".log.gz") { + if logPattern.Match([]byte(file.Name())) { fi, err := file.Info() if err != nil { InfofCtx(ctx, KeyAll, "Couldn't get size of log file %q: %v - ignoring for cleanup calculation", file.Name(), err) @@ -382,7 +397,7 @@ func runLogDeletion(ctx context.Context, logDirectory string, logLevel string, s if willDelete { for j := indexDeletePoint; j >= 0; j-- { file := files[j] - if strings.HasPrefix(file.Name(), logFilePrefix+logLevel) && strings.HasSuffix(file.Name(), ".log.gz") { + if logPattern.Match([]byte(file.Name())) { err = os.Remove(filepath.Join(logDirectory, file.Name())) if err != nil { return errors.New(fmt.Sprintf("Error deleting stale log file: %v", err)) @@ -393,3 +408,31 @@ func runLogDeletion(ctx context.Context, logDirectory string, logLevel string, s return nil } + +// getDeletionDirAndRegexp will return the directory and a regexp matching log file and rotated patterns. +func getDeletionDirAndRegexp(path string) (string, *regexp.Regexp) { + dir, filename := filepath.Split(path) + + // foo + // foo-2019-01-01T00-00-00.000 + // foo-2019-01-01T00-00-00.000.gz + filenamePattern := regexp.QuoteMeta(filename) + rotatedPattern := filenamePattern + lumberjackRotationMidfix + optionalCompressSuffix + + if lastDot := strings.LastIndex(filename, "."); lastDot != -1 { + // foo.log + // foo-2019-01-01T00-00-00.000.log + // foo-2019-01-01T00-00-00.000.log.gz + // + // or + // + // foo.bar.log + // foo.bar-2019-01-01T00-00-00.000.log + // foo.bar-2019-01-01T00-00-00.000.log.gz + prefix := filename[:lastDot] + suffix := filename[lastDot:] + rotatedPattern = regexp.QuoteMeta(prefix) + lumberjackRotationMidfix + regexp.QuoteMeta(suffix) + optionalCompressSuffix + } + + return dir, regexp.MustCompile(`^(` + filenamePattern + `|` + rotatedPattern + `)$`) +} diff --git a/base/logger_file_test.go b/base/logger_file_test.go index 168dcb3835..223d896e37 100644 --- a/base/logger_file_test.go +++ b/base/logger_file_test.go @@ -109,126 +109,147 @@ func BenchmarkFileShouldLog(b *testing.B) { } func TestRotatedLogDeletion(t *testing.T) { - var dirContents []os.DirEntry - - // Regular Test With multiple files above high and low watermark - dir := t.TempDir() - - err := makeTestFile(2, logFilePrefix+"error-2019-02-01T12-00-00.log.gz", dir) - assert.NoError(t, err) - err = makeTestFile(2, logFilePrefix+"error-2019-02-01T12-10-00.log.gz", dir) - assert.NoError(t, err) - err = makeTestFile(2, logFilePrefix+"error-2019-02-01T12-20-00.log.gz", dir) - assert.NoError(t, err) - err = makeTestFile(2, logFilePrefix+"info-2019-02-01T12-00-00.log.gz", dir) - assert.NoError(t, err) - err = makeTestFile(2, logFilePrefix+"info-2019-02-01T12-01-00.log.gz", dir) - assert.NoError(t, err) - err = makeTestFile(2, logFilePrefix+"info-2019-02-02T12-00-00.log.gz", dir) - assert.NoError(t, err) - err = makeTestFile(2, logFilePrefix+"info-2019-02-02T12-10-00.log.gz", dir) - assert.NoError(t, err) - ctx := TestCtx(t) - err = runLogDeletion(ctx, dir, "error", 3, 5) - assert.NoError(t, err) - err = runLogDeletion(ctx, dir, "info", 5, 7) - assert.NoError(t, err) - dirContents, err = os.ReadDir(dir) - require.NoError(t, err) - require.Len(t, dirContents, 3) - - var fileNames = []string{} - - for fileIndex := range dirContents { - fileNames = append(fileNames, dirContents[fileIndex].Name()) + type logFile struct { + name string + size int } - - assert.Contains(t, fileNames, logFilePrefix+"error-2019-02-01T12-20-00.log.gz") - assert.Contains(t, fileNames, logFilePrefix+"info-2019-02-02T12-00-00.log.gz") - assert.Contains(t, fileNames, logFilePrefix+"info-2019-02-02T12-10-00.log.gz") - - assert.NoError(t, os.RemoveAll(dir)) - - // Hit low watermark but not high watermark - dir = t.TempDir() - err = makeTestFile(3, logFilePrefix+"error.log.gz", dir) - assert.NoError(t, err) - err = runLogDeletion(ctx, dir, "error", 2, 4) - assert.NoError(t, err) - dirContents, err = os.ReadDir(dir) - require.NoError(t, err) - require.Len(t, dirContents, 1) - assert.NoError(t, os.RemoveAll(dir)) - - // Single file hitting low and high watermark - dir = t.TempDir() - err = makeTestFile(5, logFilePrefix+"error.log.gz", dir) - assert.NoError(t, err) - err = runLogDeletion(ctx, dir, "error", 2, 4) - assert.NoError(t, err) - dirContents, err = os.ReadDir(dir) - require.NoError(t, err) - assert.Empty(t, dirContents) - assert.NoError(t, os.RemoveAll(dir)) - - // Not hitting low or high therefore no deletion - dir = t.TempDir() - err = makeTestFile(1, logFilePrefix+"error.log.gz", dir) - assert.NoError(t, err) - err = runLogDeletion(ctx, dir, "error", 2, 4) - assert.NoError(t, err) - dirContents, err = os.ReadDir(dir) - require.NoError(t, err) - require.Len(t, dirContents, 1) - assert.NoError(t, os.RemoveAll(dir)) - - // Test deletion with files at the end of date boundaries - dir = t.TempDir() - err = makeTestFile(1, logFilePrefix+"error-2018-12-31T23-59-59.log.gz", dir) - assert.NoError(t, err) - err = makeTestFile(1, logFilePrefix+"error-2019-01-01T00-00-00.log.gz", dir) - assert.NoError(t, err) - err = makeTestFile(1, logFilePrefix+"error-2019-01-31T23-59-59.log.gz", dir) - assert.NoError(t, err) - err = makeTestFile(1, logFilePrefix+"error-2019-01-01T12-00-00.log.gz", dir) - assert.NoError(t, err) - err = runLogDeletion(ctx, dir, "error", 2, 3) - assert.NoError(t, err) - - dirContents, err = os.ReadDir(dir) - require.NoError(t, err) - require.Len(t, dirContents, 2) - - fileNames = []string{} - for fileIndex := range dirContents { - fileNames = append(fileNames, dirContents[fileIndex].Name()) + testCases := []struct { + name string + preFiles []logFile + postFileNames []string + logDeletionPrefix string + lowWatermark int + highWatermark int + }{ + { + name: "error-deletion", + preFiles: []logFile{ + logFile{size: 2, name: "error-2019-02-01T12-00-00.000.log.gz"}, + logFile{size: 2, name: "error-2019-02-01T12-10-00.000.log.gz"}, + logFile{size: 2, name: "error-2019-02-01T12-20-00.000.log.gz"}, + logFile{size: 2, name: "info-2019-02-01T12-00-00.000.log.gz"}, + logFile{size: 2, name: "info-2019-02-01T12-01-00.000.log.gz"}, + logFile{size: 2, name: "info-2019-02-02T12-00-00.000.log.gz"}, + logFile{size: 2, name: "info-2019-02-02T12-10-00.000.log.gz"}, + }, + logDeletionPrefix: "error.log", + lowWatermark: 3, + highWatermark: 5, + postFileNames: []string{ + "error-2019-02-01T12-20-00.000.log.gz", + "info-2019-02-01T12-00-00.000.log.gz", + "info-2019-02-01T12-01-00.000.log.gz", + "info-2019-02-02T12-00-00.000.log.gz", + "info-2019-02-02T12-10-00.000.log.gz", + }, + }, + { + name: "info-deletion", + preFiles: []logFile{ + logFile{size: 2, name: "error-2019-02-01T12-00-00.000.log.gz"}, + logFile{size: 2, name: "error-2019-02-01T12-10-00.000.log.gz"}, + logFile{size: 2, name: "error-2019-02-01T12-20-00.000.log.gz"}, + logFile{size: 2, name: "info-2019-02-01T12-00-00.000.log.gz"}, + logFile{size: 2, name: "info-2019-02-01T12-01-00.000.log.gz"}, + logFile{size: 2, name: "info-2019-02-02T12-00-00.000.log.gz"}, + logFile{size: 2, name: "info-2019-02-02T12-10-00.000.log.gz"}, + }, + logDeletionPrefix: "info.log", + lowWatermark: 5, + highWatermark: 7, + postFileNames: []string{ + "error-2019-02-01T12-00-00.000.log.gz", + "error-2019-02-01T12-10-00.000.log.gz", + "error-2019-02-01T12-20-00.000.log.gz", + "info-2019-02-02T12-00-00.000.log.gz", + "info-2019-02-02T12-10-00.000.log.gz", + }, + }, + { + name: "low but not high watermark", + preFiles: []logFile{ + logFile{size: 3, name: "error"}, + }, + logDeletionPrefix: "error", + lowWatermark: 2, + highWatermark: 4, + postFileNames: []string{ + "error", + }, + }, + { + name: "high and low watermark", + preFiles: []logFile{ + logFile{size: 5, name: "error"}, + }, + logDeletionPrefix: "error", + lowWatermark: 2, + highWatermark: 4, + postFileNames: []string{}, + }, + { + name: "date boundary deletion", + preFiles: []logFile{ + logFile{size: 1, name: "error-2018-12-31T23-59-59.000.log.gz"}, + logFile{size: 1, name: "error-2019-01-01T00-00-00.000.log.gz"}, + logFile{size: 1, name: "error-2019-01-31T23-59-59.000.log.gz"}, + logFile{size: 1, name: "error-2019-01-01T12-00-00.000.log.gz"}, + }, + logDeletionPrefix: "error.log", + lowWatermark: 2, + highWatermark: 3, + postFileNames: []string{ + "error-2019-01-01T12-00-00.000.log.gz", + "error-2019-01-31T23-59-59.000.log.gz", + }, + }, + { + name: "base .log and .gz logs", + preFiles: []logFile{ + logFile{size: 1, name: "error.log"}, + logFile{size: 4, name: "error-2019-01-01T00-00-00.000.log.gz"}, + }, + logDeletionPrefix: "error.log", + lowWatermark: 2, + highWatermark: 3, + postFileNames: []string{ + "error.log", + }, + }, + { + name: "base .log, gunzipped file and .gz logs", + preFiles: []logFile{ + logFile{size: 1, name: "error.log"}, + logFile{size: 4, name: "error-2019-01-01T00-00-00.000.log"}, + logFile{size: 4, name: "error-2019-01-02T00-00-00.000.log.gz"}, + }, + logDeletionPrefix: "error.log", + lowWatermark: 2, + highWatermark: 3, + postFileNames: []string{ + "error.log", + }, + }, } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { - assert.Contains(t, fileNames, logFilePrefix+"error-2019-01-01T12-00-00.log.gz") - assert.Contains(t, fileNames, logFilePrefix+"error-2019-01-31T23-59-59.log.gz") + dir := t.TempDir() - assert.NoError(t, os.RemoveAll(dir)) + for _, f := range tc.preFiles { + require.NoError(t, makeTestFile(f.size, f.name, dir)) + } - // Test deletion with no .gz files to ensure nothing is deleted - dir = t.TempDir() - err = makeTestFile(1, logFilePrefix+"error", dir) - assert.NoError(t, err) - err = makeTestFile(1, logFilePrefix+"info", dir) - assert.NoError(t, err) + ctx := TestCtx(t) + logFilePath := filepath.Join(dir, tc.logDeletionPrefix) + foundDir, logPattern := getDeletionDirAndRegexp(logFilePath) + require.Equal(t, filepath.Clean(dir), filepath.Clean(foundDir)) - dirContents, err = os.ReadDir(dir) - require.NoError(t, err) - require.Len(t, dirContents, 2) + require.NoError(t, runLogDeletion(ctx, dir, logPattern, tc.lowWatermark, tc.highWatermark)) + require.ElementsMatch(t, tc.postFileNames, getDirFiles(t, dir)) - fileNames = []string{} - for fileIndex := range dirContents { - fileNames = append(fileNames, dirContents[fileIndex].Name()) + }) } - - assert.Contains(t, fileNames, logFilePrefix+"error") - assert.Contains(t, fileNames, logFilePrefix+"info") - - assert.NoError(t, os.RemoveAll(dir)) } func makeTestFile(sizeMB int, name string, dir string) (err error) { @@ -244,3 +265,14 @@ func makeTestFile(sizeMB int, name string, dir string) (err error) { } return nil } + +func getDirFiles(t *testing.T, dir string) []string { + dirContents, err := os.ReadDir(dir) + require.NoError(t, err) + + var fileNames []string + for _, file := range dirContents { + fileNames = append(fileNames, file.Name()) + } + return fileNames +} diff --git a/base/logging_config.go b/base/logging_config.go index 5b43ce5534..f5ca0eb920 100644 --- a/base/logging_config.go +++ b/base/logging_config.go @@ -155,6 +155,7 @@ func NewMemoryLogger(level LogLevel) *FileLogger { Enabled: AtomicBool{1}, level: level, name: level.String(), + closed: make(chan struct{}), } logger.output = &logger.buffer logger.logger = log.New(&logger.buffer, "", 0) diff --git a/base/logging_test.go b/base/logging_test.go index 752ea0539a..8779abe425 100644 --- a/base/logging_test.go +++ b/base/logging_test.go @@ -13,6 +13,7 @@ import ( "fmt" "os" "path/filepath" + "regexp" "runtime" "testing" "time" @@ -72,34 +73,23 @@ func TestLogRotationInterval(t *testing.T) { CollationBufferSize: IntPtr(0), Rotation: logRotationConfig{ RotationInterval: NewConfigDuration(rotationInterval), - Compress: BoolPtr(false), + compress: BoolPtr(false), }, } - // On Windows, cleanup of t.TempDir() fails due to open log file handle from Lumberjack. Cannot be fixed from SG. - // https://github.com/natefinch/lumberjack/issues/185 - var logPath string - if runtime.GOOS == "windows" { - var err error - logPath, err = os.MkdirTemp("", t.Name()) - require.NoError(t, err) - t.Cleanup(func() { - if err := os.RemoveAll(logPath); err != nil { - // log instead of error because it's likely this is going to fail on Windows for this test. - t.Logf("couldn't remove temp dir: %v", err) - } - }) - } else { - logPath = t.TempDir() - } - + logPath := lumberjackTempDir(t) countBefore := numFilesInDir(t, logPath, false) t.Logf("countBefore: %d", countBefore) ctx := TestCtx(t) fl, err := NewFileLogger(ctx, config, LevelTrace, "test", logPath, 0, nil) require.NoError(t, err) - defer func() { require.NoError(t, fl.Close()) }() + defer func() { + assert.NoError(t, fl.Close()) + // Wait for Lumberjack to finish its async log compression work + // we have no way of waiting for this to finish, or even stopping the millRun() process inside Lumberjack. + time.Sleep(time.Second) + }() fl.logf("test 1") countAfter1 := numFilesInDir(t, logPath, false) @@ -116,9 +106,6 @@ func TestLogRotationInterval(t *testing.T) { t.Logf("countAfter2: %d", countAfter2) assert.GreaterOrEqual(t, countAfter2, countAfterSleep) - // Wait for Lumberjack to finish its async log compression work - // we have no way of waiting for this to finish, or even stopping the millRun() process inside Lumberjack. - time.Sleep(time.Second) } // Benchmark the time it takes to write x bytes of data to a logger, and optionally rotate and compress it. @@ -331,3 +318,24 @@ func BenchmarkGetCallersName(b *testing.B) { }) } } + +// lumberjackTempDir returns a temporary directory like t.Tempdir() but safe for lumberjack logs +func lumberjackTempDir(t *testing.T) string { + if runtime.GOOS != "windows" { + return t.TempDir() + } + // On Windows, cleanup of t.TempDir() fails due to open log file handle from Lumberjack. Cannot be fixed from SG. + // https://github.com/natefinch/lumberjack/issues/185 + + // windows requires no slashes in the path name + pathRegex := regexp.MustCompile(`/|\\`) + logPath, err := os.MkdirTemp("", string(pathRegex.ReplaceAll([]byte(t.Name()), []byte("_")))) + require.NoError(t, err) + t.Cleanup(func() { + if err := os.RemoveAll(logPath); err != nil { + // log instead of error because it's likely this is going to fail on Windows for this test. + t.Logf("couldn't remove temp dir: %v, files: %s", err, getDirFiles(t, logPath)) + } + }) + return logPath +} diff --git a/docs/api/components/schemas.yaml b/docs/api/components/schemas.yaml index b28c41f806..a53f5110f5 100644 --- a/docs/api/components/schemas.yaml +++ b/docs/api/components/schemas.yaml @@ -2218,37 +2218,7 @@ Startup-config: logging: description: The configuration settings for modifying Sync Gateway logging. type: object - properties: - log_file_path: - description: Absolute or relative path on the filesystem to the log file directory. A relative path is from the directory that contains the Sync Gateway executable file. - type: string - readOnly: true - redaction_level: - description: Redaction level to apply to log output. - type: string - default: partial - enum: - - none - - partial - - full - - unset - readOnly: true - console: - $ref: '#/Console-logging-config' - error: - $ref: '#/File-logging-config' - warn: - $ref: '#/File-logging-config' - info: - $ref: '#/File-logging-config' - debug: - $ref: '#/File-logging-config' - trace: - $ref: '#/File-logging-config' - stats: - $ref: '#/File-logging-config' - audit: - $ref: '#/Audit-logging-config' + $ref: '#/Logging-config' auth: type: object properties: @@ -2369,65 +2339,171 @@ Runtime-config: type: object properties: logging: - description: The configuration settings for modifying Sync Gateway logging. - type: object - properties: - log_file_path: - description: Absolute or relative path on the filesystem to the log file directory. A relative path is from the directory that contains the Sync Gateway executable file. - type: string - readOnly: true - redaction_level: - description: Redaction level to apply to log output. - type: string - default: partial - enum: - - none - - partial - - full - - unset - readOnly: true - console: - $ref: '#/Console-logging-config' - error: - $ref: '#/File-logging-config' - warn: - $ref: '#/File-logging-config' - info: - $ref: '#/File-logging-config' - debug: - $ref: '#/File-logging-config' - trace: - $ref: '#/File-logging-config' - stats: - $ref: '#/File-logging-config' - audit: - $ref: '#/Audit-logging-config' + $ref: "#/Logging-config" max_concurrent_replications: description: Maximum number of concurrent replication connections allowed. If set to 0 this limit will be ignored. type: integer default: 0 title: Runtime-config -File-logging-config: +Logging-config: + type: object + properties: + log_file_path: + description: Absolute or relative path on the filesystem to the log file directory. A relative path is from the directory that contains the Sync Gateway executable file. + type: string + readOnly: true + redaction_level: + description: Redaction level to apply to log output. + type: string + default: partial + enum: + - none + - partial + - full + - unset + readOnly: true + console: + $ref: '#/Console-logging-config' + error: + allOf: + - $ref: '#/File-logging-config-base' + - type: object + properties: + rotation: + allOf: + - $ref: '#/Log-rotation-base' + - type: object + properties: + max_age: + description: The maximum number of days to retain old log files. + default: 360 + type: integer + collation_buffer_size: + description: The size of the log collation buffer. + default: 10 + type: integer + readOnly: true + warn: + allOf: + - $ref: '#/File-logging-config-base' + - type: object + properties: + rotation: + allOf: + - $ref: '#/Log-rotation-base' + - type: object + properties: + max_age: + description: The maximum number of days to retain old log files. + default: 180 + type: integer + collation_buffer_size: + description: The size of the log collation buffer + default: 1000 + type: integer + readOnly: true + info: + allOf: + - $ref: '#/File-logging-config-base' + - type: object + properties: + rotation: + allOf: + - $ref: '#/Log-rotation-base' + - type: object + properties: + max_age: + description: The maximum number of days to retain old log files. + default: 6 + type: integer + collation_buffer_size: + description: The size of the log collation buffer + default: 1000 + type: integer + readOnly: true + debug: + allOf: + - $ref: '#/File-logging-config-base' + - type: object + properties: + rotation: + allOf: + - $ref: '#/Log-rotation-base' + - type: object + properties: + max_age: + description: The maximum number of days to retain old log files. + default: 2 + type: integer + collation_buffer_size: + description: The size of the log collation buffer + default: 0 + type: integer + readOnly: true + trace: + allOf: + - $ref: '#/File-logging-config-base' + - type: object + properties: + rotation: + allOf: + - $ref: '#/Log-rotation-base' + - type: object + properties: + max_age: + description: The maximum number of days to retain old log files. + default: 2 + type: integer + collation_buffer_size: + description: The size of the log collation buffer + default: 0 + type: integer + readOnly: true + stats: + allOf: + - $ref: '#/File-logging-config-base' + - type: object + properties: + rotation: + allOf: + - $ref: '#/Log-rotation-base' + - type: object + properties: + max_age: + description: The maximum number of days to retain old log files. + default: 6 + type: integer + collation_buffer_size: + description: The size of the log collation buffer + default: 0 + type: integer + readOnly: true + audit: + $ref: '#/Audit-logging-config' +File-logging-config-base: type: object properties: enabled: description: Toggle for this log output type: boolean - rotation: - $ref: '#/Log-rotation-config-readonly' - collation_buffer_size: - description: The size of the log collation buffer - type: integer - readOnly: true + default: false title: File-logging-config Audit-logging-config: + allOf: + - $ref: '#/File-logging-config-base' + - type: object + properties: + rotation: + allOf: + - $ref: '#/Log-rotation-base' + - type: object + properties: + max_age: + description: The maximum number of days to retain old log files. By default, there is no rotation, max_age=0. + default: 0 + type: integer type: object properties: - enabled: - description: Toggle for this log output - type: boolean - rotation: - $ref: '#/Log-rotation-config-readonly' audit_log_file_path: description: The path to write audit log files to type: string @@ -2439,21 +2515,14 @@ Audit-logging-config: type: number example: [1234, 5678] readOnly: true - collation_buffer_size: - description: The size of the log collation buffer - type: integer - readOnly: true title: Audit-logging-config -Log-rotation-config-readonly: +Log-rotation-base: type: object properties: max_size: description: The maximum size in MB of the log file before it gets rotated. type: integer default: 100 - max_age: - description: The maximum number of days to retain old log files. The defaults for this value are 360 days for error, 180 days for warn, 6 days for info, 2 days for debug, and 2 days for trace logs. - type: integer localtime: description: 'If true, it uses the computer''s local time to format the backup timestamp.' type: boolean @@ -2471,13 +2540,16 @@ Log-rotation-config-readonly: default: 0 readOnly: true title: Log-rotation-config +Log-rotation-config: + allOf: + - $ref: '#/Log-rotation-base' Console-logging-config: type: object properties: log_level: description: Log Level for the console output type: string - default: none + default: info enum: - none - error @@ -2493,6 +2565,7 @@ Console-logging-config: color_enabled: description: Log with color for the console output type: boolean + default: false readOnly: true file_output: description: 'Override the default stderr output, and write to the file specified instead' @@ -2501,12 +2574,21 @@ Console-logging-config: enabled: description: Toggle for this log output type: boolean + default: false readOnly: true rotation: - $ref: '#/Log-rotation-config-readonly' + allOf: + - $ref: '#/Log-rotation-base' + - type: object + properties: + max_age: + description: The maximum number of days to retain old log files. By default, there is no rotation, max_age=0. + default: 0 + type: integer collation_buffer_size: - description: The size of the log collation buffer. + description: The size of the log collation buffer. The default is 10 if the output is stderr, or 1000 if to a file. type: integer + default: 10 readOnly: true title: Console-logging-config Log-update-enabled: