-
Notifications
You must be signed in to change notification settings - Fork 0
/
sessionlog.go
485 lines (424 loc) · 12.9 KB
/
sessionlog.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
/*
Copyright 2017 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package events
import (
"archive/tar"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"syscall"
"time"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib/session"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
log "github.com/sirupsen/logrus"
)
// sessionLogger is an interface that all session loggers must implement.
type SessionLogger interface {
// LogEvent logs events associated with this session.
LogEvent(fields EventFields) error
// Close is called when clients close on the requested "session writer".
// We ignore their requests because this writer (file) should be closed only
// when the session logger is closed.
Close() error
// Finalize is called by the session when it's closing. This is where we're
// releasing audit resources associated with the session
Finalize() error
// PostSessionSlice posts session slice
PostSessionSlice(slice SessionSlice) error
}
// DiskSessionLoggerConfig sets up parameters for disk session logger
// associated with the session ID
type DiskSessionLoggerConfig struct {
// SessionID is the session id of the logger
SessionID session.ID
// DataDir is data directory for session events files
DataDir string
// Clock is the clock replacement
Clock clockwork.Clock
// RecordSessions controls if sessions are recorded along with audit events.
RecordSessions bool
// Namespace is logger namespace
Namespace string
// ServerID is a server ID
ServerID string
}
func (cfg *DiskSessionLoggerConfig) CheckAndSetDefaults() error {
return nil
}
// NewDiskSessionLogger creates new disk based session logger
func NewDiskSessionLogger(cfg DiskSessionLoggerConfig) (*DiskSessionLogger, error) {
if err := cfg.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
var err error
sessionDir := filepath.Join(cfg.DataDir, cfg.ServerID, SessionLogsDir, cfg.Namespace)
indexFile, err := os.OpenFile(
filepath.Join(sessionDir, fmt.Sprintf("%v.index", cfg.SessionID.String())), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
return nil, trace.Wrap(err)
}
sessionLogger := &DiskSessionLogger{
DiskSessionLoggerConfig: cfg,
Entry: log.WithFields(log.Fields{
trace.Component: teleport.ComponentAuditLog,
trace.ComponentFields: log.Fields{
"sid": cfg.SessionID,
},
}),
sessionDir: sessionDir,
indexFile: indexFile,
lastEventIndex: -1,
lastChunkIndex: -1,
}
return sessionLogger, nil
}
// DiskSessionLogger implements a disk based session logger. The imporant
// property of the disk based logger is that it never fails and can be used as
// a fallback implementation behind more sophisticated loggers.
type DiskSessionLogger struct {
DiskSessionLoggerConfig
*log.Entry
sync.Mutex
sid session.ID
sessionDir string
indexFile *os.File
eventsFile *gzipWriter
chunksFile *gzipWriter
lastEventIndex int64
lastChunkIndex int64
}
// LogEvent logs an event associated with this session
func (sl *DiskSessionLogger) LogEvent(fields EventFields) error {
panic("should not be used")
}
// Close is called when clients close on the requested "session writer".
// We ignore their requests because this writer (file) should be closed only
// when the session logger is closed
func (sl *DiskSessionLogger) Close() error {
return nil
}
func openFileForTar(filename string) (*tar.Header, io.ReadCloser, error) {
fi, err := os.Stat(filename)
if err != nil {
return nil, nil, trace.ConvertSystemError(err)
}
header := tar.Header{
Name: filepath.Base(filename),
Size: fi.Size(),
Mode: int64(fi.Mode()),
ModTime: fi.ModTime(),
}
sys, ok := fi.Sys().(*syscall.Stat_t)
if ok {
header.Uid = int(sys.Uid)
header.Gid = int(sys.Gid)
}
f, err := os.Open(filename)
if err != nil {
return nil, nil, trace.ConvertSystemError(err)
}
return &header, f, nil
}
// Finalize is called by the session when it's closing. This is where we're
// releasing audit resources associated with the session
func (sl *DiskSessionLogger) Finalize() error {
sl.Lock()
defer sl.Unlock()
return sl.finalize()
}
// flush is used to flush gzip frames to file, otherwise
// some attempts to read the file could fail
func (sl *DiskSessionLogger) flush() error {
var err, err2 error
if sl.RecordSessions && sl.chunksFile != nil {
err = sl.chunksFile.Flush()
}
if sl.eventsFile != nil {
err2 = sl.eventsFile.Flush()
}
return trace.NewAggregate(err, err2)
}
func (sl *DiskSessionLogger) finalize() error {
auditOpenFiles.Dec()
if sl.indexFile != nil {
sl.indexFile.Close()
}
if sl.chunksFile != nil {
if err := sl.chunksFile.Close(); err != nil {
log.Warningf("Failed closing chunks file: %v.", err)
}
}
if sl.eventsFile != nil {
if err := sl.eventsFile.Close(); err != nil {
log.Warningf("Failed closing events file: %v.", err)
}
}
// create a sentinel to signal completion
signalFile := filepath.Join(sl.sessionDir, fmt.Sprintf("%v.completed", sl.SessionID.String()))
err := ioutil.WriteFile(signalFile, []byte("completed"), 0640)
if err != nil {
log.Warningf("Failed creating signal file: %v.", err)
}
return nil
}
// eventsFileName consists of session id and the first global event index recorded there
func eventsFileName(dataDir string, sessionID session.ID, eventIndex int64) string {
return filepath.Join(dataDir, fmt.Sprintf("%v-%v.events.gz", sessionID.String(), eventIndex))
}
// chunksFileName consists of session id and the first global offset recorded
func chunksFileName(dataDir string, sessionID session.ID, offset int64) string {
return filepath.Join(dataDir, fmt.Sprintf("%v-%v.chunks.gz", sessionID.String(), offset))
}
func (sl *DiskSessionLogger) openEventsFile(eventIndex int64) error {
if sl.eventsFile != nil {
err := sl.eventsFile.Close()
if err != nil {
sl.Warningf("Failed to close file: %v", trace.DebugReport(err))
}
}
eventsFileName := eventsFileName(sl.sessionDir, sl.SessionID, eventIndex)
// update the index file to write down that new events file has been created
data, err := json.Marshal(indexEntry{
FileName: filepath.Base(eventsFileName),
Type: fileTypeEvents,
Index: eventIndex,
})
if err != nil {
return trace.Wrap(err)
}
_, err = fmt.Fprintf(sl.indexFile, "%v\n", string(data))
if err != nil {
return trace.Wrap(err)
}
// open new events file for writing
file, err := os.OpenFile(eventsFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
return trace.Wrap(err)
}
sl.eventsFile = newGzipWriter(file)
return nil
}
func (sl *DiskSessionLogger) openChunksFile(offset int64) error {
if sl.chunksFile != nil {
err := sl.chunksFile.Close()
if err != nil {
sl.Warningf("Failed to close file: %v", trace.DebugReport(err))
}
}
chunksFileName := chunksFileName(sl.sessionDir, sl.SessionID, offset)
// udpate the index file to write down that new chunks file has been created
data, err := json.Marshal(indexEntry{
FileName: filepath.Base(chunksFileName),
Type: fileTypeChunks,
Offset: offset,
})
if err != nil {
return trace.Wrap(err)
}
_, err = fmt.Fprintf(sl.indexFile, "%v\n", string(data))
if err != nil {
return trace.Wrap(err)
}
// open new chunks file for writing
file, err := os.OpenFile(chunksFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
if err != nil {
return trace.Wrap(err)
}
sl.chunksFile = newGzipWriter(file)
return nil
}
// PostSessionSlice takes series of events associated with the session
// and writes them to events files and data file for future replays
func (sl *DiskSessionLogger) PostSessionSlice(slice SessionSlice) error {
sl.Lock()
defer sl.Unlock()
for i := range slice.Chunks {
_, err := sl.writeChunk(slice.SessionID, slice.Chunks[i])
if err != nil {
return trace.Wrap(err)
}
}
return sl.flush()
}
// EventFromChunk retuns event converted from session chunk
func EventFromChunk(sessionID string, chunk *SessionChunk) (EventFields, error) {
var fields EventFields
eventStart := time.Unix(0, chunk.Time).In(time.UTC).Round(time.Millisecond)
err := json.Unmarshal(chunk.Data, &fields)
if err != nil {
return nil, trace.Wrap(err)
}
fields[SessionEventID] = sessionID
fields[EventIndex] = chunk.EventIndex
fields[EventTime] = eventStart
fields[EventType] = chunk.EventType
return fields, nil
}
func (sl *DiskSessionLogger) writeChunk(sessionID string, chunk *SessionChunk) (written int, err error) {
// this section enforces the following invariant:
// a single events file only contains successive events
if sl.lastEventIndex == -1 || chunk.EventIndex-1 != sl.lastEventIndex {
if err := sl.openEventsFile(chunk.EventIndex); err != nil {
return -1, trace.Wrap(err)
}
}
sl.lastEventIndex = chunk.EventIndex
eventStart := time.Unix(0, chunk.Time).In(time.UTC).Round(time.Millisecond)
if chunk.EventType != SessionPrintEvent {
fields, err := EventFromChunk(sessionID, chunk)
if err != nil {
return -1, trace.Wrap(err)
}
data, err := json.Marshal(fields)
if err != nil {
return -1, trace.Wrap(err)
}
return sl.eventsFile.Write(append(data, '\n'))
}
if !sl.RecordSessions {
return len(chunk.Data), nil
}
// this section enforces the following invariant:
// a single chunks file only contains successive chunks
if sl.lastChunkIndex == -1 || chunk.ChunkIndex-1 != sl.lastChunkIndex {
if err := sl.openChunksFile(chunk.Offset); err != nil {
return -1, trace.Wrap(err)
}
}
sl.lastChunkIndex = chunk.ChunkIndex
event := printEvent{
Start: eventStart,
Type: SessionPrintEvent,
Bytes: int64(len(chunk.Data)),
DelayMilliseconds: chunk.Delay,
Offset: chunk.Offset,
EventIndex: chunk.EventIndex,
ChunkIndex: chunk.ChunkIndex,
}
bytes, err := json.Marshal(event)
if err != nil {
return -1, trace.Wrap(err)
}
_, err = sl.eventsFile.Write(append(bytes, '\n'))
if err != nil {
return -1, trace.Wrap(err)
}
return sl.chunksFile.Write(chunk.Data)
}
func diff(before, after time.Time) int64 {
d := int64(after.Sub(before) / time.Millisecond)
if d < 0 {
return 0
}
return d
}
const (
fileTypeChunks = "chunks"
fileTypeEvents = "events"
)
type indexEntry struct {
FileName string `json:"file_name"`
Type string `json:"type"`
Index int64 `json:"index"`
Offset int64 `json:"offset,"`
authServer string
}
type printEvent struct {
// Start is event start
Start time.Time `json:"time"`
// Type is event type
Type string `json:"event"`
// Bytes is event bytes
Bytes int64 `json:"bytes"`
// DelayMilliseconds is the delay in milliseconds from the start of the session
DelayMilliseconds int64 `json:"ms"`
// Offset int64 is the offset in bytes in the session file
Offset int64 `json:"offset"`
// EventIndex is the global event index
EventIndex int64 `json:"ei"`
// ChunkIndex is the global chunk index
ChunkIndex int64 `json:"ci"`
}
// gzipWriter wraps file, on close close both gzip writer and file
type gzipWriter struct {
*gzip.Writer
file *os.File
}
// Close closes gzip writer and file
func (f *gzipWriter) Close() error {
var errors []error
if f.Writer != nil {
errors = append(errors, f.Writer.Close())
f.Writer.Reset(ioutil.Discard)
writerPool.Put(f.Writer)
f.Writer = nil
}
if f.file != nil {
errors = append(errors, f.file.Close())
f.file = nil
}
return trace.NewAggregate(errors...)
}
// writerPool is a sync.Pool for shared gzip writers.
// each gzip writer allocates a lot of memory
// so it makes sense to reset the writer and reuse the
// internal buffers to avoid too many objects on the heap
var writerPool = sync.Pool{
New: func() interface{} {
w, _ := gzip.NewWriterLevel(ioutil.Discard, gzip.BestSpeed)
return w
},
}
func newGzipWriter(file *os.File) *gzipWriter {
g := writerPool.Get().(*gzip.Writer)
g.Reset(file)
return &gzipWriter{
Writer: g,
file: file,
}
}
// gzipReader wraps file, on close close both gzip writer and file
type gzipReader struct {
io.ReadCloser
file io.Closer
}
// Close closes file and gzip writer
func (f *gzipReader) Close() error {
var errors []error
if f.ReadCloser != nil {
errors = append(errors, f.ReadCloser.Close())
f.ReadCloser = nil
}
if f.file != nil {
errors = append(errors, f.file.Close())
f.file = nil
}
return trace.NewAggregate(errors...)
}
func newGzipReader(file *os.File) (*gzipReader, error) {
reader, err := gzip.NewReader(file)
if err != nil {
return nil, trace.Wrap(err)
}
return &gzipReader{
ReadCloser: reader,
file: file,
}, nil
}