diff --git a/handler_test.go b/handler_test.go index c01d51c..8135569 100644 --- a/handler_test.go +++ b/handler_test.go @@ -39,4 +39,9 @@ func TestLevelFormatting(t *testing.T) { assert.True(t, lf.IsHandling(slog.InfoLevel)) assert.True(t, lf.IsHandling(slog.ErrorLevel)) assert.False(t, lf.IsHandling(slog.TraceLevel)) + + // test level mode + assert.Eq(t, "list", slog.LevelModeList.String()) + assert.Eq(t, "max", slog.LevelModeMax.String()) + assert.Eq(t, "unknown", slog.LevelMode(9).String()) } diff --git a/rotatefile/cleanup.go b/rotatefile/cleanup.go index 1504471..57a9ee9 100644 --- a/rotatefile/cleanup.go +++ b/rotatefile/cleanup.go @@ -2,42 +2,80 @@ package rotatefile import ( "os" + "sort" "time" + "github.com/gookit/goutil/errorx" "github.com/gookit/goutil/fsutil" ) +const defaultCheckInterval = 60 * time.Second + // CConfig struct for clean files type CConfig struct { // BackupNum max number for keep old files. // 0 is not limit, default is 20. BackupNum uint `json:"backup_num" yaml:"backup_num"` - // BackupTime max time for keep old files, unit is hours. + // BackupTime max time for keep old files, unit is TimeUnit. // // 0 is not limit, default is a week. BackupTime uint `json:"backup_time" yaml:"backup_time"` - // Compress determines if the rotated log files should be compressed - // using gzip. The default is not to perform compression. + // Compress determines if the rotated log files should be compressed using gzip. + // The default is not to perform compression. Compress bool `json:"compress" yaml:"compress"` - // FileDirs list - FileDirs []string `json:"file_dirs" yaml:"file_dirs"` - - // Patterns list. filename match patterns. eg: error.log.* + // Patterns dir path with filename match patterns. + // + // eg: ["/tmp/error.log.*", "/path/to/info.log.*", "/path/to/dir/*"] Patterns []string `json:"patterns" yaml:"patterns"` // TimeClock for clean files TimeClock Clocker - // ignore error - // TODO ignoreError bool + // TimeUnit for BackupTime. default is hours: time.Hour + TimeUnit time.Duration `json:"time_unit" yaml:"time_unit"` + + // CheckInterval for clean files on daemon run. default is 60s. + CheckInterval time.Duration `json:"check_interval" yaml:"check_interval"` + + // IgnoreError ignore remove error + // TODO IgnoreError bool + + // RotateMode for rotate split files TODO + // - copy+cut: copy contents then truncate file + // - rename : rename file(use for like PHP-FPM app) + // RotateMode RotateMode `json:"rotate_mode" yaml:"rotate_mode"` } -// AddFileDir for clean -func (c *CConfig) AddFileDir(dirs ...string) *CConfig { - c.FileDirs = append(c.FileDirs, dirs...) +// CConfigFunc for clean config +type CConfigFunc func(c *CConfig) + +// AddDirPath for clean, will auto append * for match all files +func (c *CConfig) AddDirPath(dirPaths ...string) *CConfig { + for _, dirPath := range dirPaths { + if !fsutil.IsDir(dirPath) { + continue + } + c.Patterns = append(c.Patterns, dirPath+"/*") + } + return c +} + +// AddPattern for clean. eg: "/tmp/error.log.*" +func (c *CConfig) AddPattern(patterns ...string) *CConfig { + c.Patterns = append(c.Patterns, patterns...) + return c +} + +// WithConfigFn for custom settings +func (c *CConfig) WithConfigFn(fns ...CConfigFunc) *CConfig { + for _, fn := range fns { + if fn != nil { + fn(c) + } + } return c } @@ -47,37 +85,45 @@ func NewCConfig() *CConfig { BackupNum: DefaultBackNum, BackupTime: DefaultBackTime, TimeClock: DefaultTimeClockFn, + TimeUnit: time.Hour, + // check interval time + CheckInterval: defaultCheckInterval, } } // FilesClear multi files by time. TODO // use for rotate and clear other program produce log files type FilesClear struct { - // mu sync.Mutex + // mu sync.Mutex cfg *CConfig + // inited mark + inited bool - namePattern string - filepathDirs []string - // full file path glob patterns - filePatterns []string - // file max backup time. equals CConfig.BackupTime * time.Hour - backupDur time.Duration + // file max backup time. equals CConfig.BackupTime * CConfig.TimeUnit + backupDur time.Duration + quitDaemon chan struct{} } // NewFilesClear instance -func NewFilesClear(cfg *CConfig) *FilesClear { - if cfg == nil { - cfg = NewCConfig() - } +func NewFilesClear(fns ...CConfigFunc) *FilesClear { + cfg := NewCConfig().WithConfigFn(fns...) + return &FilesClear{cfg: cfg} +} - return &FilesClear{ - cfg: cfg, - } +// Config get +func (r *FilesClear) Config() *CConfig { + return r.cfg +} + +// WithConfig for custom set config +func (r *FilesClear) WithConfig(cfg *CConfig) *FilesClear { + r.cfg = cfg + return r } // WithConfigFn for custom settings -func (r *FilesClear) WithConfigFn(fn func(c *CConfig)) *FilesClear { - fn(r.cfg) +func (r *FilesClear) WithConfigFn(fns ...CConfigFunc) *FilesClear { + r.cfg.WithConfigFn(fns...) return r } @@ -87,119 +133,136 @@ func (r *FilesClear) WithConfigFn(fn func(c *CConfig)) *FilesClear { // --------------------------------------------------------------------------- // -const flushInterval = 60 * time.Second +// QuitDaemon for stop daemon clean +func (r *FilesClear) QuitDaemon() { + if r.quitDaemon == nil { + panic("cannot quit daemon, please call DaemonClean() first") + } + close(r.quitDaemon) +} -// CleanDaemon daemon clean old files by config -func (r *FilesClear) CleanDaemon() { +// DaemonClean daemon clean old files by config +// +// NOTE: this method will block current goroutine +// +// Usage: +// +// fc := rotatefile.NewFilesClear(nil) +// fc.WithConfigFn(func(c *rotatefile.CConfig) { +// c.AddDirPath("./testdata") +// }) +// +// wg := sync.WaitGroup{} +// wg.Add(1) +// +// // start daemon +// go fc.DaemonClean(func() { +// wg.Done() +// }) +// +// // wait for stop +// wg.Wait() +func (r *FilesClear) DaemonClean(onStop func()) { if r.cfg.BackupNum == 0 && r.cfg.BackupTime == 0 { - return + panic("clean: backupNum and backupTime are both 0") } - t := time.NewTicker(flushInterval) - for range t.C { - printErrln("files-clear: clean old files error:", r.Clean()) - } -} + r.quitDaemon = make(chan struct{}) + tk := time.NewTicker(r.cfg.CheckInterval) + defer tk.Stop() -// Clean old files by config -func (r *FilesClear) Clean() (err error) { - // clear by time, can also clean by number - if r.cfg.BackupTime > 0 { - cutTime := r.cfg.TimeClock.Now().Add(-r.backupDur) - for _, fileDir := range r.filepathDirs { - // eg: /tmp/ + error.log.* => /tmp/error.log.* - filePattern := fileDir + r.namePattern - - err = r.cleanByBackupTime(filePattern, cutTime) - if err != nil { - return err + for { + select { + case <-r.quitDaemon: + if onStop != nil { + onStop() } + return + case <-tk.C: // do cleaning + printErrln("files-clear: cleanup old files error:", r.Clean()) } + } +} - for _, pattern := range r.filePatterns { - err = r.cleanByBackupTime(pattern, cutTime) - if err != nil { - break - } - } +// Clean old files by config +func (r *FilesClear) prepare() { + if r.inited { return } + r.inited = true - if r.cfg.BackupNum == 0 { - return nil + // check backup time + if r.cfg.BackupTime > 0 { + r.backupDur = time.Duration(r.cfg.BackupTime) * r.cfg.TimeUnit } +} - // clear by number. - bckNum := int(r.cfg.BackupNum) - for _, fileDir := range r.filepathDirs { - pattern := fileDir + r.namePattern - if err = r.cleanByBackupNum(pattern, bckNum); err != nil { - return err - } +// Clean old files by config +func (r *FilesClear) Clean() error { + if r.cfg.BackupNum == 0 && r.cfg.BackupTime == 0 { + return errorx.Err("clean: backupNum and backupTime are both 0") } - for _, pattern := range r.filePatterns { - if err = r.cleanByBackupNum(pattern, bckNum); err != nil { - break + // clear by time, can also clean by number + for _, filePattern := range r.cfg.Patterns { + if err := r.cleanByPattern(filePattern); err != nil { + return err } } - return + return nil } -func (r *FilesClear) cleanByBackupNum(filePattern string, bckNum int) (err error) { - keepNum := 0 - return fsutil.GlobWithFunc(filePattern, func(oldFile string) error { - stat, err := os.Stat(oldFile) - if err != nil { - return err - } - - if stat.IsDir() { - return nil - } - - if keepNum < bckNum { - keepNum++ - } - - // remove expired files - return os.Remove(oldFile) - }) -} +// CleanByPattern clean files by pattern +func (r *FilesClear) cleanByPattern(filePattern string) (err error) { + r.prepare() -func (r *FilesClear) cleanByBackupTime(filePattern string, cutTime time.Time) (err error) { - oldFiles := make([]string, 0, 8) + oldFiles := make([]fileInfo, 0, 8) + cutTime := r.cfg.TimeClock.Now().Add(-r.backupDur) - // match all old rotate files. eg: /tmp/error.log.* + // find and clean expired files err = fsutil.GlobWithFunc(filePattern, func(filePath string) error { stat, err := os.Stat(filePath) if err != nil { return err } + // not handle subdir if stat.IsDir() { return nil } + // collect not expired if stat.ModTime().After(cutTime) { - oldFiles = append(oldFiles, filePath) + oldFiles = append(oldFiles, newFileInfo(filePath, stat)) return nil } - // remove expired files - return os.Remove(filePath) + // remove expired file + return r.remove(filePath) }) - // clear by number. - maxNum := int(r.cfg.BackupNum) - if maxNum > 0 && len(oldFiles) > maxNum { - for idx := 0; len(oldFiles) > maxNum; idx++ { - err = os.Remove(oldFiles[idx]) - if err != nil { + // clear by backup number. + backNum := int(r.cfg.BackupNum) + remNum := len(oldFiles) - backNum + + if backNum > 0 && remNum > 0 { + // sort by mod-time, oldest at first. + sort.Sort(modTimeFInfos(oldFiles)) + + for idx := 0; idx < len(oldFiles); idx++ { + if err = r.remove(oldFiles[idx].Path()); err != nil { + break + } + + remNum-- + if remNum == 0 { break } } } - return } + +func (r *FilesClear) remove(filePath string) (err error) { + return os.Remove(filePath) +} diff --git a/rotatefile/cleanup_test.go b/rotatefile/cleanup_test.go index 306f5ed..0a54471 100644 --- a/rotatefile/cleanup_test.go +++ b/rotatefile/cleanup_test.go @@ -1,18 +1,127 @@ package rotatefile_test import ( + "fmt" + "os" + "sync" "testing" + "time" + "github.com/gookit/goutil" + "github.com/gookit/goutil/dump" + "github.com/gookit/goutil/fsutil" "github.com/gookit/goutil/testutil/assert" "github.com/gookit/slog/rotatefile" ) -func TestNewFilesClear(t *testing.T) { - fc := rotatefile.NewFilesClear(nil) +func TestFilesClear_Clean(t *testing.T) { + fc := rotatefile.NewFilesClear() + fc.WithConfig(rotatefile.NewCConfig()) fc.WithConfigFn(func(c *rotatefile.CConfig) { - c.AddFileDir("./testdata") + c.AddDirPath("testdata", "not-exist-dir") + c.BackupNum = 1 + c.BackupTime = 2 + c.TimeUnit = time.Second // for test }) - err := fc.Clean() + cfg := fc.Config() + assert.Eq(t, uint(1), cfg.BackupNum) + dump.P(cfg) + + // make files for clean + makeNum := 5 + makeWaitCleanFiles("file_clean.log", makeNum) + + // do clean + assert.NoErr(t, fc.Clean()) + + files := fsutil.Glob("testdata/file_clean.log.*") + dump.P(files) + assert.NotEmpty(t, files) + assert.Lt(t, len(files), makeNum) + + t.Run("error", func(t *testing.T) { + fc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) { + c.BackupNum = 0 + c.BackupTime = 0 + }) + assert.Err(t, fc.Clean()) + }) +} + +func TestFilesClear_DaemonClean(t *testing.T) { + t.Run("panic", func(t *testing.T) { + fc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) { + c.BackupNum = 0 + c.BackupTime = 0 + }) + assert.Panics(t, func() { + fc.QuitDaemon() + }) + assert.Panics(t, func() { + fc.DaemonClean(nil) + }) + }) + + fc := rotatefile.NewFilesClear(func(c *rotatefile.CConfig) { + c.AddPattern("testdata/file_daemon_clean.*") + c.BackupNum = 2 + c.BackupTime = 3 + c.TimeUnit = time.Second // for test + c.CheckInterval = time.Second // for test + }) + + cfg := fc.Config() + dump.P(cfg) + + // make files for clean + makeNum := 5 + _, err := fsutil.PutContents("testdata/subdir/some.txt", "test data") assert.NoErr(t, err) + makeWaitCleanFiles("file_daemon_clean.log", makeNum) + + // test daemon clean + wg := sync.WaitGroup{} + wg.Add(1) + + // start daemon + go fc.DaemonClean(func() { + fmt.Println("daemon clean stopped") + wg.Done() + }) + + // stop daemon + go func() { + time.Sleep(time.Second * 1) + fmt.Println("stop daemon clean") + fc.QuitDaemon() + }() + + // wait for stop + wg.Wait() + + files := fsutil.Glob("testdata/file_daemon_clean.log.*") + dump.P(files) + assert.NotEmpty(t, files) + assert.Lt(t, len(files), makeNum) +} + +func makeWaitCleanFiles(nameTpl string, makeNum int) { + for i := 0; i < makeNum; i++ { + fpath := fmt.Sprintf("testdata/%s.%03d", nameTpl, i) + fmt.Println("make file:", fpath) + _, err := fsutil.PutContents(fpath, []byte("test contents ...")) + goutil.PanicErr(err) + time.Sleep(time.Second) + } + + fmt.Println("wait clean files:") + err := fsutil.GlobWithFunc("./testdata/"+nameTpl+".*", func(fpath string) error { + fi, err := os.Stat(fpath) + goutil.PanicErr(err) + + fmt.Printf(" %s => mtime: %s\n", fpath, fi.ModTime().Format("060102T15:04:05")) + return nil + }) + goutil.PanicErr(err) } diff --git a/rotatefile/testdata/.keep b/rotatefile/testdata/.keep deleted file mode 100644 index e69de29..0000000 diff --git a/rotatefile/writer.go b/rotatefile/writer.go index d3544a9..90806ef 100644 --- a/rotatefile/writer.go +++ b/rotatefile/writer.go @@ -2,6 +2,7 @@ package rotatefile import ( "fmt" + "io/fs" "os" "path" "path/filepath" @@ -132,6 +133,7 @@ func (d *Writer) Write(p []byte) (n int, err error) { return } + // update written size d.written += uint64(n) // rotate file @@ -272,7 +274,7 @@ func (d *Writer) asyncCleanBackups() { // Clean old files by config func (d *Writer) Clean() (err error) { if d.cfg.BackupNum == 0 && d.cfg.BackupTime == 0 { - return + return errorx.Err("clean: backupNum and backupTime are both 0") } // oldFiles: old xx.log.xx files, no gz file @@ -293,9 +295,9 @@ func (d *Writer) Clean() (err error) { gzNum := len(gzFiles) oldNum := len(oldFiles) maxNum := int(d.cfg.BackupNum) - rmNum := gzNum + oldNum - maxNum + remNum := gzNum + oldNum - maxNum - if rmNum > 0 { + if remNum > 0 { // remove old gz files if gzNum > 0 { sort.Sort(modTimeFInfos(gzFiles)) // sort by mod-time @@ -305,20 +307,21 @@ func (d *Writer) Clean() (err error) { break } - rmNum-- - if rmNum == 0 { + remNum-- + if remNum == 0 { break } } if err != nil { - return errorx.Wrap(err, "") + return errorx.Wrap(err, "remove old gz file error") } } // remove old log files - if rmNum > 0 && oldNum > 0 { - sort.Sort(modTimeFInfos(oldFiles)) // sort by mod-time + if remNum > 0 && oldNum > 0 { + // sort by mod-time, oldest at first. + sort.Sort(modTimeFInfos(oldFiles)) var idx int for idx = 0; idx < oldNum; idx++ { @@ -326,8 +329,8 @@ func (d *Writer) Clean() (err error) { break } - rmNum-- - if rmNum == 0 { + remNum-- + if remNum == 0 { break } } @@ -393,12 +396,18 @@ func (d *Writer) compressFiles(oldFiles []fileInfo) error { return nil } +// TODO replace to fsutil.FileInfo type fileInfo struct { - os.FileInfo + fs.FileInfo filePath string } -func newFileInfo(filePath string, fi os.FileInfo) fileInfo { +// Path get file full path. eg: "/path/to/file.go" +func (fi *fileInfo) Path() string { + return fi.filePath +} + +func newFileInfo(filePath string, fi fs.FileInfo) fileInfo { return fileInfo{filePath: filePath, FileInfo: fi} } diff --git a/rotatefile/writer_test.go b/rotatefile/writer_test.go index 97c424f..9a2471d 100644 --- a/rotatefile/writer_test.go +++ b/rotatefile/writer_test.go @@ -29,7 +29,7 @@ func ExampleNewWriter_on_other_logger() { } func TestNewWriter(t *testing.T) { - testFile := "testdata/test.log" + testFile := "testdata/test_writer.log" assert.NoErr(t, fsutil.DeleteIfExist(testFile)) w, err := rotatefile.NewConfig(testFile).Create() @@ -83,7 +83,7 @@ func TestWriter_Rotate_modeCreate(t *testing.T) { } func TestWriter_Clean(t *testing.T) { - logfile := "testdata/test_clean.log" + logfile := "testdata/writer_clean.log" c := rotatefile.NewConfig(logfile) c.MaxSize = 128