Skip to content

Commit

Permalink
👔 up: update some code logic and add more unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
inhere committed Apr 14, 2023
1 parent 2eac8d4 commit 1fac5f7
Show file tree
Hide file tree
Showing 15 changed files with 129 additions and 67 deletions.
3 changes: 3 additions & 0 deletions handler/buffer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,9 @@ func TestLineBuffWriter(t *testing.T) {
h := handler.LineBuffWriter(file, 12, slog.AllLevels)
assert.NoErr(t, err)
assert.True(t, fsutil.IsFile(logfile))
assert.Panics(t, func() {
handler.LineBuffWriter(nil, 12, slog.AllLevels)
})

r := newLogRecord("Test LineBuffWriter")
err = h.Handle(r)
Expand Down
1 change: 0 additions & 1 deletion handler/buffer_wrapper.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ func (w *bufferWrapper) Flush() error {
if err := w.buffer.Flush(); err != nil {
return err
}

return w.handler.Flush()
}

Expand Down
38 changes: 19 additions & 19 deletions logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package slog

import (
"context"
"os"
"sync"
"time"

Expand Down Expand Up @@ -78,7 +77,7 @@ func NewWithName(name string, fns ...LoggerFn) *Logger {
logger := &Logger{
name: name,
// exit handle
ExitFunc: os.Exit,
// ExitFunc: os.Exit,
PanicFunc: DefaultPanicFn,
exitHandlers: []func(){},
// options
Expand Down Expand Up @@ -124,7 +123,9 @@ func (l *Logger) Config(fns ...LoggerFn) *Logger {
return l
}

// Configure current logger
// Configure current logger.
//
// Deprecated: use Config()
func (l *Logger) Configure(fn LoggerFn) *Logger { return l.Config(fn) }

// FlushDaemon run flush handle on daemon
Expand All @@ -134,26 +135,29 @@ func (l *Logger) Configure(fn LoggerFn) *Logger { return l.Config(fn) }
// go slog.FlushDaemon()
func (l *Logger) FlushDaemon() {
for range time.NewTicker(flushInterval).C {
err := l.lockAndFlushAll()
printlnStderr("slog: daemon flush logs error: ", err)
if err := l.lockAndFlushAll(); err != nil {
printlnStderr("slog.FlushDaemon: daemon flush logs error: ", err)
}
}
}

// FlushTimeout flush logs on limit time.
//
// refer from glog package
func (l *Logger) FlushTimeout(timeout time.Duration) {
done := make(chan bool, 1)
go func() {
err := l.lockAndFlushAll()
printlnStderr("slog: flush logs error: ", err)
if err := l.lockAndFlushAll(); err != nil {
printlnStderr("slog.FlushTimeout: flush logs error: ", err)
}

done <- true
}()

select {
case <-done:
case <-time.After(timeout):
printlnStderr("slog: flush took longer than timeout:", timeout)
printlnStderr("slog.FlushTimeout: flush took longer than timeout:", timeout)
}
}

Expand All @@ -168,8 +172,7 @@ func (l *Logger) Flush() error { return l.lockAndFlushAll() }

// MustFlush flush logs. will panic on error
func (l *Logger) MustFlush() {
err := l.lockAndFlushAll()
if err != nil {
if err := l.lockAndFlushAll(); err != nil {
panic(err)
}
}
Expand All @@ -190,10 +193,9 @@ func (l *Logger) lockAndFlushAll() error {

// flush all without lock
func (l *Logger) flushAll() {
// Flush from fatal down, in case there's trouble flushing.
// flush from fatal down, in case there's trouble flushing.
_ = l.VisitAll(func(handler Handler) error {
err := handler.Flush()
if err != nil {
if err := handler.Flush(); err != nil {
l.err = err
printlnStderr("slog: call handler.Flush() error:", err)
}
Expand All @@ -204,9 +206,8 @@ func (l *Logger) flushAll() {
// Close the logger
func (l *Logger) Close() error {
_ = l.VisitAll(func(handler Handler) error {
// Flush logs and then close
err := handler.Close()
if err != nil {
// flush logs and then close
if err := handler.Close(); err != nil {
l.err = err
printlnStderr("slog: call handler.Close() error:", err)
}
Expand Down Expand Up @@ -250,10 +251,9 @@ func (l *Logger) Exit(code int) {
// global exit handlers
runExitHandlers()

if l.ExitFunc == nil {
l.ExitFunc = os.Exit
if l.ExitFunc != nil {
l.ExitFunc(code)
}
l.ExitFunc(code)
}

// SetName for logger
Expand Down
12 changes: 12 additions & 0 deletions logger_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,18 @@ func TestLogger_logf_allLevel(t *testing.T) {
printfAllLevelLogs(l, "this a log %s", "message")
}

func TestLogger_write_error(t *testing.T) {
h := newTestHandler()
h.errOnHandle = true

l := slog.NewWithHandlers(h)
l.Info("a message")

err := l.LastErr()
assert.Err(t, err)
assert.Eq(t, "handle error", err.Error())
}

func newLogger() *slog.Logger {
return slog.NewWithConfig(func(l *slog.Logger) {
l.ReportCaller = true
Expand Down
27 changes: 18 additions & 9 deletions processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ func (fn ProcessorFunc) Process(record *Record) {

// ProcessableHandler interface
type ProcessableHandler interface {
// AddProcessor add an processor
// AddProcessor add a processor
AddProcessor(Processor)
// ProcessRecord handle an record
// ProcessRecord handle a record
ProcessRecord(record *Record)
}

Expand Down Expand Up @@ -60,7 +60,6 @@ func (p *Processable) ProcessRecord(r *Record) {
// AddHostname to record
func AddHostname() Processor {
hostname, _ := os.Hostname()

return ProcessorFunc(func(record *Record) {
record.AddField("hostname", hostname)
})
Expand All @@ -71,12 +70,7 @@ func AddUniqueID(fieldName string) Processor {
hs := md5.New()

return ProcessorFunc(func(record *Record) {
rb, err := strutil.RandomBytes(32)
if err != nil {
record.WithError(err)
return
}

rb, _ := strutil.RandomBytes(32)
hs.Write(rb)
randomID := hex.EncodeToString(hs.Sum(nil))
hs.Reset()
Expand All @@ -91,3 +85,18 @@ var MemoryUsage ProcessorFunc = func(record *Record) {
runtime.ReadMemStats(stat)
record.SetExtraValue("memoryUsage", stat.Alloc)
}

// AppendCtxKeys append context keys to record.Fields
func AppendCtxKeys(keys ...string) Processor {
return ProcessorFunc(func(record *Record) {
if record.Ctx == nil {
return
}

for _, key := range keys {
if val := record.Ctx.Value(key); val != nil {
record.AddField(key, record.Ctx.Value(key))
}
}
})
}
22 changes: 19 additions & 3 deletions processor_test.go
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
package slog_test

import (
"bytes"
"context"
"fmt"
"os"
"testing"

"github.com/gookit/goutil/byteutil"
"github.com/gookit/goutil/testutil/assert"
"github.com/gookit/slog"
)

func TestLogger_AddProcessor(t *testing.T) {
buf := new(bytes.Buffer)
buf := new(byteutil.Buffer)

l := slog.NewJSONSugared(buf, slog.InfoLevel)
l.AddProcessor(slog.AddHostname())
Expand Down Expand Up @@ -43,7 +44,22 @@ func TestLogger_AddProcessor(t *testing.T) {
buf.Reset()
assert.Contains(t, str, `"message":"message3"`)
assert.Contains(t, str, `"requestId":`)
fmt.Println(str)
fmt.Print(str)

l.ResetProcessors()
l.AddProcessors(slog.AppendCtxKeys("traceId", "userId"))
l.Info("message4")
str = buf.ResetAndGet()
fmt.Print(str)
assert.Contains(t, str, `"message":"message4"`)
assert.NotContains(t, str, `"traceId"`)

ctx := context.WithValue(context.Background(), "traceId", "traceId123abc456")
l.WithCtx(ctx).Info("message5")
str = buf.ResetAndGet()
fmt.Print(str)
assert.Contains(t, str, `"message":"message5"`)
assert.Contains(t, str, `"traceId":"traceId123abc456"`)
}

func TestProcessable_AddProcessor(t *testing.T) {
Expand Down
7 changes: 5 additions & 2 deletions record.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,9 @@ func (r *Record) WithTime(t time.Time) *Record {
return nr
}

// WithCtx on record
func (r *Record) WithCtx(ctx context.Context) *Record { return r.WithContext(ctx) }

// WithContext on record
func (r *Record) WithContext(ctx context.Context) *Record {
nr := r.Copy()
Expand Down Expand Up @@ -294,8 +297,8 @@ func (r *Record) SetFields(fields M) *Record {

func (r *Record) log(level Level, args []any) {
r.Level = level
// will reduce memory allocation once
// r.Message = strutil.Byte2str(formatArgsWithSpaces(args))

// r.Message = strutil.Byte2str(formatArgsWithSpaces(args)) // will reduce memory allocation once
r.Message = formatArgsWithSpaces(args)
// r.logWrite(level)
r.logger.writeRecord(level, r)
Expand Down
8 changes: 6 additions & 2 deletions record_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,15 +87,17 @@ func TestRecord_SetContext(t *testing.T) {
w := newBuffer()
l := slog.NewWithConfig(func(l *slog.Logger) {
l.DoNothingOnPanicFatal()
}).Config(func(l *slog.Logger) {
l.CallerFlag = slog.CallerFlagPkg
})
l.SetHandlers([]slog.Handler{
handler.NewIOWriter(w, slog.AllLevels),
})

r := l.Record()
r.SetContext(context.Background()).Info("info message")
r.WithContext(context.Background()).Debug("debug message")
r.SetCtx(context.Background()).Info("info message")
r.WithCtx(context.Background()).Debug("debug message")

s := w.StringReset()
fmt.Print(s)
assert.Contains(t, s, "github.com/gookit/slog_test")
Expand Down Expand Up @@ -154,8 +156,10 @@ func TestRecord_AddFields(t *testing.T) {
func TestRecord_SetFields(t *testing.T) {
r := newLogRecord("AddFields")

r.SetTime(timex.Now().Yesterday().T())
r.SetFields(slog.M{"f1": "hi", "env": "prod"})
assert.NotEmpty(t, r.Fields)
assert.NotEmpty(t, r.Time)
}

func TestRecord_allLevel(t *testing.T) {
Expand Down
22 changes: 10 additions & 12 deletions rotatefile/clean_files.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package rotatefile
import (
"os"
"time"

"github.com/gookit/goutil/fsutil"
)

// CConfig struct for clean files
Expand Down Expand Up @@ -83,16 +85,15 @@ func (r *FilesClear) WithConfigFn(fn func(c *CConfig)) *FilesClear {

const flushInterval = 60 * time.Second

// DaemonClean daemon clean old files by config
func (r *FilesClear) DaemonClean() {
// CleanDaemon daemon clean old files by config
func (r *FilesClear) CleanDaemon() {
if r.cfg.BackupNum == 0 && r.cfg.BackupTime == 0 {
return
}

for range time.NewTicker(flushInterval).C {
err := r.Clean()
if err != nil {
printErrln("files-clear: clean files error:", err)
if err := r.Clean(); err != nil {
printErrln("files-clear: clean old files error:", err)
}
}
}
Expand Down Expand Up @@ -129,16 +130,13 @@ func (r *FilesClear) Clean() (err error) {
bckNum := int(r.cfg.BackupNum)
for _, fileDir := range r.filepathDirs {
pattern := fileDir + r.namePattern

err = r.cleanByBackupNum(pattern, bckNum)
if err != nil {
if err = r.cleanByBackupNum(pattern, bckNum); err != nil {
return err
}
}

for _, pattern := range r.filePatterns {
err = r.cleanByBackupNum(pattern, bckNum)
if err != nil {
if err = r.cleanByBackupNum(pattern, bckNum); err != nil {
break
}
}
Expand All @@ -147,7 +145,7 @@ func (r *FilesClear) Clean() (err error) {

func (r *FilesClear) cleanByBackupNum(filePattern string, bckNum int) (err error) {
keepNum := 0
err = globWithFunc(filePattern, func(oldFile string) error {
err = fsutil.GlobWithFunc(filePattern, func(oldFile string) error {
stat, err := os.Stat(oldFile)
if err != nil {
return err
Expand All @@ -172,7 +170,7 @@ func (r *FilesClear) cleanByBackupTime(filePattern string, cutTime time.Time) (e
oldFiles := make([]string, 0, 8)

// match all old rotate files. eg: /tmp/error.log.*
err = globWithFunc(filePattern, func(filePath string) error {
err = fsutil.GlobWithFunc(filePattern, func(filePath string) error {
stat, err := os.Stat(filePath)
if err != nil {
return err
Expand Down
16 changes: 0 additions & 16 deletions rotatefile/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"compress/gzip"
"io"
"os"
"path/filepath"

"github.com/gookit/goutil/fsutil"
)
Expand Down Expand Up @@ -42,21 +41,6 @@ func compressFile(srcPath, dstPath string) error {
return zw.Close()
}

func globWithFunc(pattern string, fn func(filePath string) error) (err error) {
files, err := filepath.Glob(pattern)
if err != nil {
return err
}

for _, filePath := range files {
err = fn(filePath)
if err != nil {
break
}
}
return
}

type filterFunc func(fPath string, fi os.FileInfo) bool
type handleFunc func(fPath string, fi os.FileInfo) error

Expand Down
Loading

0 comments on commit 1fac5f7

Please sign in to comment.