forked from ruiaylin/pgparser
/
sync_buffer.go
162 lines (144 loc) · 4.4 KB
/
sync_buffer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package log
import (
"bufio"
"context"
"os"
"sync/atomic"
"time"
"github.com/ruiaylin/pgparser/build"
"github.com/ruiaylin/pgparser/utils/timeutil"
)
// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
// file's Sync method and providing a wrapper for the Write method that provides log
// file rotation. There are conflicting methods, so the file cannot be embedded.
// l.mu is held for all its methods.
type syncBuffer struct {
*bufio.Writer
logger *loggerT
file *os.File
lastRotation int64
nbytes int64 // The number of bytes written to this file so far.
}
// Sync implements the flushSyncWriter interface.
//
// Note: the other methods from flushSyncWriter (Flush, io.Writer) is
// implemented by the embedded *bufio.Writer directly.
func (sb *syncBuffer) Sync() error {
return sb.file.Sync()
}
func (sb *syncBuffer) Write(p []byte) (n int, err error) {
if sb.nbytes+int64(len(p)) >= atomic.LoadInt64(&LogFileMaxSize) {
if err := sb.rotateFile(timeutil.Now()); err != nil {
sb.logger.exitLocked(err)
}
}
n, err = sb.Writer.Write(p)
sb.nbytes += int64(n)
if err != nil {
sb.logger.exitLocked(err)
}
return
}
// createFile initializes the syncBuffer for a logger, and triggers
// creation of the log file.
// Assumes that l.mu is held by the caller.
func (l *loggerT) createFile() error {
now := timeutil.Now()
if l.mu.file == nil {
sb := &syncBuffer{
logger: l,
}
if err := sb.rotateFile(now); err != nil {
return err
}
l.mu.file = sb
}
return nil
}
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
if sb.file != nil {
if err := sb.Flush(); err != nil {
return err
}
if err := sb.file.Close(); err != nil {
return err
}
}
var err error
sb.file, sb.lastRotation, _, err = create(&sb.logger.logDir, sb.logger.prefix, now, sb.lastRotation)
sb.nbytes = 0
if err != nil {
return err
}
// If this logger is responsible for capturing direct writes to the
// process' file descriptor 2, then do it here.
//
// This captures e.g. all writes performed by internal
// assertions in the Go runtime.
if sb.logger.redirectInternalStderrWrites {
// NB: any concurrent output to stderr may straddle the old and new
// files. This doesn't apply to log messages as we won't reach this code
// unless we're not logging to stderr.
if err := hijackStderr(sb.file); err != nil {
return err
}
}
// bufferSize sizes the buffer associated with each log file. It's large
// so that log records can accumulate without the logging thread blocking
// on disk I/O. The flushDaemon will block instead.
const bufferSize = 256 * 1024
sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
messages := make([]Entry, 0, 6)
messages = append(messages,
sb.logger.makeStartLine("file created at: %s", Safe(now.Format("2006/01/02 15:04:05"))),
sb.logger.makeStartLine("running on machine: %s", host),
sb.logger.makeStartLine("binary: %s", Safe(build.GetInfo().Short())),
sb.logger.makeStartLine("arguments: %s", os.Args),
)
logging.mu.Lock()
if logging.mu.clusterID != "" {
messages = append(messages, sb.logger.makeStartLine("clusterID: %s", logging.mu.clusterID))
}
logging.mu.Unlock()
// Including a non-ascii character in the first 1024 bytes of the log helps
// viewers that attempt to guess the character encoding.
messages = append(messages,
sb.logger.makeStartLine("line format: [IWEF]yymmdd hh:mm:ss.uuuuuu goid file:line msg utf8=\u2713"))
for _, entry := range messages {
buf := logging.formatLogEntry(entry, nil, nil)
var n int
n, err = sb.file.Write(buf.Bytes())
putBuffer(buf)
sb.nbytes += int64(n)
if err != nil {
return err
}
}
select {
case sb.logger.gcNotify <- struct{}{}:
default:
}
return nil
}
func (l *loggerT) makeStartLine(format string, args ...interface{}) Entry {
entry := MakeEntry(
context.Background(),
Severity_INFO,
nil, /* logCounter */
2, /* depth */
l.redactableLogs.Get(),
format,
args...)
entry.Tags = "config"
return entry
}