-
Notifications
You must be signed in to change notification settings - Fork 283
/
filestore.go
4525 lines (4192 loc) · 131 KB
/
filestore.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2016-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stores
import (
"bufio"
"errors"
"fmt"
"hash/crc32"
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/nats-streaming-server/logger"
"github.com/nats-io/nats-streaming-server/spb"
"github.com/nats-io/nats-streaming-server/util"
"github.com/nats-io/stan.go/pb"
)
const (
// Our file version.
fileVersion = 1
// Prefix for message log files
msgFilesPrefix = "msgs."
// Data files suffix
datSuffix = ".dat"
// Index files suffix
idxSuffix = ".idx"
// Backup file suffix
bakSuffix = ".bak"
// Name of the subscriptions file.
subsFileName = "subs" + datSuffix
// Name of the clients file.
clientsFileName = "clients" + datSuffix
// Name of the server file.
serverFileName = "server" + datSuffix
// Number of bytes required to store a CRC-32 checksum
crcSize = crc32.Size
// Size of a record header.
// 4 bytes: For typed records: 1 byte for type, 3 bytes for buffer size
// For non typed rec: buffer size
// +4 bytes for CRC-32
recordHeaderSize = 4 + crcSize
// defaultBufSize is used for various buffered IO operations
defaultBufSize = 10 * 1024 * 1024
// Size of an message index record
// Seq - Offset - Timestamp - Size - CRC
msgIndexRecSize = 8 + 8 + 8 + 4 + crcSize
// msgRecordOverhead is the number of bytes to count toward the size
// of a serialized message so that file slice size is closer to
// channels and/or file slice limits.
msgRecordOverhead = recordHeaderSize + msgIndexRecSize
// Percentage of buffer usage to decide if the buffer should shrink
bufShrinkThreshold = 50
// Interval when to check/try to shrink buffer writers
defaultBufShrinkInterval = 5 * time.Second
// Interval an unused file slice is left opened
defaultSliceCloseInterval = time.Second
// If FileStoreOption's BufferSize is > 0, the buffer writer is initially
// created with this size (unless this is > than BufferSize, in which case
// BufferSize is used). When possible, the buffer will shrink but not lower
// than this value. This is for FileSubStore's
subBufMinShrinkSize = 128
// If FileStoreOption's BufferSize is > 0, the buffer writer is initially
// created with this size (unless this is > than BufferSize, in which case
// BufferSize is used). When possible, the buffer will shrink but not lower
// than this value. This is for FileMsgStore's
msgBufMinShrinkSize = 512
// This is the sleep time in the background tasks go routine.
defaultBkgTasksSleepDuration = time.Second
// This is the default amount of time a message is cached.
defaultCacheTTL = time.Second
// defaultFileFlags are the default file flags used when opening a file
defaultFileFlags = os.O_RDWR | os.O_CREATE | os.O_APPEND
// Lock file name
lockFileName = ".rootdir.lck"
// Witness file for TruncateUnexpectedEOF option
truncateBadEOFFileName = ".truncate.lck"
)
// FileStoreOption is a function on the options for a File Store
type FileStoreOption func(*FileStoreOptions) error
// FileStoreOptions can be used to customize a File Store
type FileStoreOptions struct {
// BufferSize is the size of the buffer used during store operations.
BufferSize int
// CompactEnabled allows to enable/disable files compaction.
CompactEnabled bool
// CompactInterval indicates the minimum interval (in seconds) between compactions.
CompactInterval int
// CompactFragmentation indicates the minimum ratio of fragmentation
// to trigger compaction. For instance, 50 means that compaction
// would not happen until fragmentation is more than 50%.
CompactFragmentation int
// CompactMinFileSize indicates the minimum file size before compaction
// can be performed, regardless of the current file fragmentation.
CompactMinFileSize int64
// DoCRC enables (or disables) CRC checksum verification on read operations.
DoCRC bool
// CRCPoly is a polynomial used to make the table used in CRC computation.
CRCPolynomial int64
// DoSync indicates if `File.Sync()`` is called during a flush.
DoSync bool
// Regardless of channel limits, the options below allow to split a message
// log in smaller file chunks. If all those options were to be set to 0,
// some file slice limit will be selected automatically based on the channel
// limits.
// SliceMaxMsgs defines how many messages can fit in a file slice (0 means
// count is not checked).
SliceMaxMsgs int
// SliceMaxBytes defines how many bytes can fit in a file slice, including
// the corresponding index file (0 means size is not checked).
SliceMaxBytes int64
// SliceMaxAge defines the period of time covered by a slice starting when
// the first message is stored (0 means time is not checked).
SliceMaxAge time.Duration
// SliceArchiveScript is the path to a script to be invoked when a file
// slice (and the corresponding index file) is going to be removed.
// The script will be invoked with the channel name and names of data and
// index files (which both have been previously renamed with a '.bak'
// extension). It is the responsibility of the script to move/remove
// those files.
SliceArchiveScript string
// FileDescriptorsLimit is a soft limit hinting at FileStore to try to
// limit the number of concurrent opened files to that limit.
FileDescriptorsLimit int64
// Number of channels recovered in parallel (default is 1).
ParallelRecovery int
// TruncateUnexpectedEOF is set to true means that if recovery reports
// an error about unexpected end of file, the last bad record will be
// removed (the file is truncated at the beginning of the first incomplete
// record). Dataloss may occur.
TruncateUnexpectedEOF bool
// ReadBufferSize, if non zero, will cause the store to preload messages
// (up to this total size) when looking up a message. We expect that the
// client will be asking for the following sequential messages, so this
// is a read ahead optimization.
ReadBufferSize int
// AutoSync defines how often the store will flush and sync the files in
// the background. The default is set to 60 seconds.
// This is useful when a file sync is not desired for each Flush() call
// by setting DoSync to false.
// Setting AutoSync to any value <= 0 will disable auto sync.
AutoSync time.Duration
}
// This is an internal error to detect situations where we do
// not get an EOF but all data we read are zeros. The file
// will be rewind to previous position and use this as the
// first write position.
var errNeedRewind = errors.New("end of file padded with zeros")
// DefaultFileStoreOptions defines the default options for a File Store.
var DefaultFileStoreOptions = FileStoreOptions{
BufferSize: 2 * 1024 * 1024, // 2MB
CompactEnabled: true,
CompactInterval: 5 * 60, // 5 minutes
CompactFragmentation: 50,
CompactMinFileSize: 1024 * 1024,
DoCRC: true,
CRCPolynomial: int64(crc32.IEEE),
DoSync: true,
SliceMaxBytes: 64 * 1024 * 1024, // 64MB
ParallelRecovery: 1,
ReadBufferSize: 2 * 1024 * 1024, // 2MB
AutoSync: time.Minute,
}
// BufferSize is a FileStore option that sets the size of the buffer used
// during store writes. This can help improve write performance.
func BufferSize(size int) FileStoreOption {
return func(o *FileStoreOptions) error {
if size < 0 {
return fmt.Errorf("buffer size value must be a positive number")
}
o.BufferSize = size
return nil
}
}
// ReadBufferSize is a FileStore option that sets the size of the buffer used
// during store reads. This can help improve read performance.
func ReadBufferSize(size int) FileStoreOption {
return func(o *FileStoreOptions) error {
if size < 0 {
return fmt.Errorf("read buffer size value must be a positive number")
}
o.ReadBufferSize = size
return nil
}
}
// CompactEnabled is a FileStore option that enables or disables file compaction.
// The value false will disable compaction.
func CompactEnabled(enabled bool) FileStoreOption {
return func(o *FileStoreOptions) error {
o.CompactEnabled = enabled
return nil
}
}
// CompactInterval is a FileStore option that defines the minimum compaction interval.
// Compaction is not timer based, but instead when things get "deleted". This value
// prevents compaction to happen too often.
func CompactInterval(seconds int) FileStoreOption {
return func(o *FileStoreOptions) error {
if seconds <= 0 {
return fmt.Errorf("compact interval value must at least be 1 seconds")
}
o.CompactInterval = seconds
return nil
}
}
// CompactFragmentation is a FileStore option that defines the fragmentation ratio
// below which compaction would not occur. For instance, specifying 50 means that
// if other variables would allow for compaction, the compaction would occur only
// after 50% of the file has data that is no longer valid.
func CompactFragmentation(fragmentation int) FileStoreOption {
return func(o *FileStoreOptions) error {
if fragmentation <= 0 {
return fmt.Errorf("compact fragmentation value must at least be 1")
}
o.CompactFragmentation = fragmentation
return nil
}
}
// CompactMinFileSize is a FileStore option that defines the minimum file size below
// which compaction would not occur. Specify `0` if you don't want any minimum.
func CompactMinFileSize(fileSize int64) FileStoreOption {
return func(o *FileStoreOptions) error {
if fileSize < 0 {
return fmt.Errorf("compact minimum file size value must be a positive number")
}
o.CompactMinFileSize = fileSize
return nil
}
}
// DoCRC is a FileStore option that defines if a CRC checksum verification should
// be performed when records are read from disk.
func DoCRC(enableCRC bool) FileStoreOption {
return func(o *FileStoreOptions) error {
o.DoCRC = enableCRC
return nil
}
}
// CRCPolynomial is a FileStore option that defines the polynomial to use to create
// the table used for CRC-32 Checksum.
// See https://golang.org/pkg/hash/crc32/#MakeTable
func CRCPolynomial(polynomial int64) FileStoreOption {
return func(o *FileStoreOptions) error {
if polynomial <= 0 || polynomial > int64(0xFFFFFFFF) {
return fmt.Errorf("crc polynomial should be between 1 and %v", int64(0xFFFFFFFF))
}
o.CRCPolynomial = polynomial
return nil
}
}
// DoSync is a FileStore option that defines if `File.Sync()` should be called
// during a `Flush()` call.
func DoSync(enableFileSync bool) FileStoreOption {
return func(o *FileStoreOptions) error {
o.DoSync = enableFileSync
return nil
}
}
// AutoSync is a FileStore option that defines how often each store is sync'ed on disk.
// Any value <= 0 will disable this feature.
func AutoSync(dur time.Duration) FileStoreOption {
return func(o *FileStoreOptions) error {
o.AutoSync = dur
return nil
}
}
// SliceConfig is a FileStore option that allows the configuration of
// file slice limits and optional archive script file name.
func SliceConfig(maxMsgs int, maxBytes int64, maxAge time.Duration, script string) FileStoreOption {
return func(o *FileStoreOptions) error {
if maxMsgs < 0 || maxBytes < 0 || maxAge < 0 {
return fmt.Errorf("slice max values must be positive numbers")
}
o.SliceMaxMsgs = maxMsgs
o.SliceMaxBytes = maxBytes
o.SliceMaxAge = maxAge
o.SliceArchiveScript = script
return nil
}
}
// FileDescriptorsLimit is a soft limit hinting at FileStore to try to
// limit the number of concurrent opened files to that limit.
func FileDescriptorsLimit(limit int64) FileStoreOption {
return func(o *FileStoreOptions) error {
if limit < 0 {
return fmt.Errorf("file descriptor limit must be a positive number")
}
o.FileDescriptorsLimit = limit
return nil
}
}
// ParallelRecovery is a FileStore option that allows the parallel
// recovery of channels. When running with SSDs, try to use a higher
// value than the default number of 1. When running with HDDs,
// performance may be better if it stays at 1.
func ParallelRecovery(count int) FileStoreOption {
return func(o *FileStoreOptions) error {
if count <= 0 {
return fmt.Errorf("parallel recovery value must be at least 1")
}
o.ParallelRecovery = count
return nil
}
}
// TruncateUnexpectedEOF indicates if on recovery the store should
// truncate a file that reports an unexpected end-of-file (EOF) on recovery.
// If set to true, the invalid record byte content is printed but the store
// will truncate the file prior to this bad record and proceed with recovery.
// Dataloss may occur.
func TruncateUnexpectedEOF(truncate bool) FileStoreOption {
return func(o *FileStoreOptions) error {
o.TruncateUnexpectedEOF = truncate
return nil
}
}
// AllOptions is a convenient option to pass all options from a FileStoreOptions
// structure to the constructor.
func AllOptions(opts *FileStoreOptions) FileStoreOption {
return func(o *FileStoreOptions) error {
if err := BufferSize(opts.BufferSize)(o); err != nil {
return err
}
if err := CompactInterval(opts.CompactInterval)(o); err != nil {
return err
}
if err := CompactFragmentation(opts.CompactFragmentation)(o); err != nil {
return err
}
if err := CompactMinFileSize(opts.CompactMinFileSize)(o); err != nil {
return err
}
if err := CRCPolynomial(opts.CRCPolynomial)(o); err != nil {
return err
}
if err := SliceConfig(opts.SliceMaxMsgs, opts.SliceMaxBytes, opts.SliceMaxAge, opts.SliceArchiveScript)(o); err != nil {
return err
}
if err := FileDescriptorsLimit(opts.FileDescriptorsLimit)(o); err != nil {
return err
}
if err := ParallelRecovery(opts.ParallelRecovery)(o); err != nil {
return err
}
if err := ReadBufferSize(opts.ReadBufferSize)(o); err != nil {
return err
}
if err := AutoSync(opts.AutoSync)(o); err != nil {
return err
}
o.CompactEnabled = opts.CompactEnabled
o.DoCRC = opts.DoCRC
o.DoSync = opts.DoSync
o.TruncateUnexpectedEOF = opts.TruncateUnexpectedEOF
return nil
}
}
// Type for the records in the subscriptions file
type recordType byte
// Protobufs do not share a common interface, yet, when saving a
// record on disk, we have to get the size and marshal the record in
// a buffer. These methods are available in all the protobuf.
// So we create this interface with those two methods to be used by the
// writeRecord method.
type record interface {
Size() int
MarshalTo([]byte) (int, error)
}
// This is use for cases when the record is not typed
const recNoType = recordType(0)
// Record types for subscription file
const (
subRecNew = recordType(iota) + 1
subRecUpdate
subRecDel
subRecAck
subRecMsg
)
// Record types for client store
const (
addClient = recordType(iota) + 1
delClient
)
type fileID int64
type beforeFileClose func() error
const (
invalidFileID fileID = -1
fileOpened = int32(1)
fileInUse = int32(2)
fileClosing = int32(3)
fileClosed = int32(4)
fileRemoved = int32(5)
fmClosed = int32(6)
)
type file struct {
// Atomic need to be memory aligned. Put them first in the
// structure definition.
state int32
id fileID
handle *os.File
name string
flags int
beforeClose beforeFileClose
}
type filesManager struct {
sync.Mutex
openedFDs int64
limit int64
rootDir string
files map[fileID]*file
nextID fileID
isClosed bool
}
// FileStore is the storage interface for STAN servers, backed by files.
type FileStore struct {
genericStore
fm *filesManager
serverFile *file
clientsFile *file
opts FileStoreOptions
compactItvl time.Duration
clients map[string]*Client
delClientRec spb.ClientDelete
cliFileSize int64
cliDeleteRecs int // Number of deleted client records
cliCompactTS time.Time
crcTable *crc32.Table
lockFile util.LockFile
}
type subscription struct {
sub *spb.SubState
seqnos map[uint64]struct{}
}
type bufferedWriter struct {
buf *bufio.Writer
bufSize int // current buffer size
minShrinkSize int // minimum shrink size. Note that this can be bigger than maxSize (see setSizes)
maxSize int // maximum size the buffer can grow
shrinkReq bool // used to decide if buffer should shrink
}
// FileSubStore is a subscription store in files.
type FileSubStore struct {
genericSubStore
fstore *FileStore
fm *filesManager
tmpSubBuf []byte
file *file
bw *bufferedWriter
delSub spb.SubStateDelete
updateSub spb.SubStateUpdate
opts *FileStoreOptions // points to options from FileStore
compactItvl time.Duration
fileSize int64
numRecs int // Number of records (sub and msgs)
delRecs int // Number of delete (or ack) records
compactTS time.Time
crcTable *crc32.Table // reference to the one from FileStore
activity bool // was there any write between two flush calls
writer io.Writer // this is either `bw` or `file` depending if buffer writer is used or not
shrinkTimer *time.Timer // timer associated with callback shrinking buffer when possible
syncTimer *time.Timer // timer associated with performing auto flush and disk sync
synced int64 // number of times the file is actually sync'ed
allDone sync.WaitGroup
}
// fileSlice represents one of the message store file (there are a number
// of files for a MsgStore on a given channel).
type fileSlice struct {
file *file
idxFile *file
firstSeq uint64
lastSeq uint64
rmCount int // Count of messages "removed" from the slice due to limits.
msgsCount int
msgsSize uint64
firstWrite int64 // Time the first message was added to this slice (used for slice age limit)
lastUsed int64
}
// msgIndex contains the message's offset in the data file, its timestamp
// and size, which allows quick recovery of message and reconstructing of
// file slices. It also helps for GetSequenceFromTimestamp by not having
// to recover actual messages to find out the correct message sequence
// based on timestamp.
type msgIndex struct {
offset int64
timestamp int64
msgSize uint32
}
// bufferedMsg is required to keep track of a message and msgRecord when
// file buffering is used. It is possible that a message and index is
// not flushed on disk while the message gets removed from the store
// due to limit. We need a map that keeps a reference to message and
// record until the file is flushed.
type bufferedMsg struct {
msg *pb.MsgProto
index *msgIndex
}
// cachedMsg is a structure that contains a reference to a message
// and cache expiration value. The cache has a map and list so
// that cached messages can be ordered by expiration time.
type cachedMsg struct {
expiration int64
msg *pb.MsgProto
prev *cachedMsg
next *cachedMsg
}
// msgsCache is the file store cache.
type msgsCache struct {
tryEvict int32
seqMaps map[uint64]*cachedMsg
head *cachedMsg
tail *cachedMsg
}
// FileMsgStore is a per channel message file store.
type FileMsgStore struct {
genericMsgStore
// Atomic operations require 64bit aligned fields to be able
// to run with 32bit processes.
checkSlices int64 // used with atomic operations
timeTick int64 // time captured in background tasks go routine
tmpMsgBuf []byte
fm *filesManager // shortcut to ms.fstore.fm
hasFDsLimit bool // shortcut to ms.fstore.opts.FileDescriptorsLimit > 0
bw *bufferedWriter
writer io.Writer // this is `bw.buf` or `file` depending if buffer writer is used or not
files map[int]*fileSlice
writeSlice *fileSlice
channelName string
firstFSlSeq int // First file slice sequence number
lastFSlSeq int // Last file slice sequence number
slCountLim int
slSizeLim uint64
slAgeLim int64
slHasLimits bool
fstore *FileStore // pointer to file store object
cache *msgsCache
wOffset int64
firstMsg *pb.MsgProto
lastMsg *pb.MsgProto
expiration int64
bufferedSeqs []uint64
bufferedMsgs map[uint64]*bufferedMsg
bkgTasksDone chan bool // signal the background tasks go routine to stop
bkgTasksWake chan bool // signal the background tasks go routine to get out of a sleep
allDone sync.WaitGroup
readBufSize int
needSync bool // this required to reduce sync'ing when DoSync==false, but AutoSync>0
synced int64 // number of times the file is actually sync'ed
}
type bufferPool struct {
p sync.Pool
}
// Get returns a pointer to slice of at least `needed` capacity.
// For reason why we use pointer to slice, check https://staticcheck.io/docs/checks#SA6002
func (bp *bufferPool) Get(needed int) *[]byte {
pBuf, _ := bp.p.Get().(*[]byte)
if pBuf != nil && cap(*pBuf) >= needed {
return pBuf
}
buf := make([]byte, needed)
return &buf
}
// Put back the pointer to slice back to the pool.
func (bp *bufferPool) Put(pBuf *[]byte) {
bp.p.Put(pBuf)
}
var fsReadBufPool = &bufferPool{}
// some variables based on constants but that we can change
// for tests puposes.
var (
bufShrinkInterval = defaultBufShrinkInterval
bkgTaskMu sync.Mutex
bkgTaskRefs int
bkgTasksSleepDuration = defaultBkgTasksSleepDuration
cacheTTL = int64(defaultCacheTTL)
sliceCloseInterval = defaultSliceCloseInterval
fillGaps = true
)
// FileStoreTestSetBackgroundTaskInterval is used by tests to reduce the interval
// at which some tasks are performed in the background
func FileStoreTestSetBackgroundTaskInterval(wait time.Duration) {
// It is possible that both the server test package and
// stores test package run in paraller. Ensure that only
// one is setting the value to avoid races.
bkgTaskMu.Lock()
if bkgTaskRefs == 0 {
bkgTasksSleepDuration = wait
}
bkgTaskRefs++
bkgTaskMu.Unlock()
}
// openFile opens the file specified by `filename`.
// If the file exists, it checks that the version is supported.
// The file is created if not present, opened in Read/Write and Append mode.
var openFile = func(fileName string) (*os.File, error) {
return openFileWithFlags(fileName, defaultFileFlags)
}
// openFileWithModes opens the file specified by `filename`, using
// the `modes` as open flags.
// If the file exists, it checks that the version is supported.
// If no open mode override is provided, the file is created if not present,
// opened in Read/Write and Append mode.
func openFileWithFlags(fileName string, flags int) (*os.File, error) {
checkVersion := false
// Check if file already exists
if s, err := os.Stat(fileName); s != nil && err == nil {
checkVersion = true
}
file, err := os.OpenFile(fileName, flags, 0666)
if err != nil {
return nil, err
}
if checkVersion {
err = checkFileVersion(file)
} else {
// This is a new file, write our file version
err = util.WriteInt(file, fileVersion)
}
if err != nil {
file.Close()
file = nil
}
return file, err
}
// check that the version of the file is understood by this interface
func checkFileVersion(r io.Reader) error {
fv, err := util.ReadInt(r)
if err != nil {
return fmt.Errorf("unable to verify file version: %v", err)
}
if fv == 0 || fv > fileVersion {
return fmt.Errorf("unsupported file version: %v (supports [1..%v])", fv, fileVersion)
}
return nil
}
// writeRecord writes a record to `w`.
// The record layout is as follows:
// 8 bytes:
// - 4 bytes for type and/or size combined
// - 4 bytes for CRC-32
//
// variable bytes: payload.
// If a buffer is provided, this function uses it and expands it if necessary.
// The function returns the buffer (possibly changed due to expansion) and the
// number of bytes written into that buffer.
func writeRecord(w io.Writer, buf []byte, recType recordType, rec record, recSize int, crcTable *crc32.Table) ([]byte, int, error) {
// This is the header + payload size
totalSize := recordHeaderSize + recSize
// Alloc or realloc as needed
buf = util.EnsureBufBigEnough(buf, totalSize)
// If there is a record type, encode it
headerFirstInt := 0
if recType != recNoType {
if recSize > 0xFFFFFF {
panic("record size too big")
}
// Encode the type in the high byte of the header
headerFirstInt = int(recType)<<24 | recSize
} else {
// The header is the size of the record
headerFirstInt = recSize
}
// Write the first part of the header at the beginning of the buffer
util.ByteOrder.PutUint32(buf[:4], uint32(headerFirstInt))
// Marshal the record into the given buffer, after the header offset
if _, err := rec.MarshalTo(buf[recordHeaderSize:totalSize]); err != nil {
// Return the buffer because the caller may have provided one
return buf, 0, err
}
// Compute CRC
crc := crc32.Checksum(buf[recordHeaderSize:totalSize], crcTable)
// Write it in the buffer
util.ByteOrder.PutUint32(buf[4:recordHeaderSize], crc)
// Are we dealing with a buffered writer?
bw, isBuffered := w.(*bufio.Writer)
// if so, make sure that if what we are about to "write" is more
// than what's available, then first flush the buffer.
// This is to reduce the risk of partial writes.
if isBuffered && (bw.Buffered() > 0) && (bw.Available() < totalSize) {
if err := bw.Flush(); err != nil {
return buf, 0, err
}
}
// Write the content of our slice into the writer `w`
if _, err := w.Write(buf[:totalSize]); err != nil {
// Return the tmpBuf because the caller may have provided one
return buf, 0, err
}
return buf, totalSize, nil
}
// readRecord reads a record from `r`, possibly checking the CRC-32 checksum.
// When `buf` is not nil, this function ensures the buffer is big enough to
// hold the payload (expanding if necessary). Therefore, this call always
// return `buf`, regardless if there is an error or not.
// The caller is indicating if the record is supposed to be typed or not.
func readRecord(r io.Reader, buf []byte, recTyped bool, crcTable *crc32.Table, checkCRC bool, expectedSize int) ([]byte, int, recordType, error) {
_header := [recordHeaderSize]byte{}
header := _header[:]
if _, err := io.ReadFull(r, header); err != nil {
return buf, 0, recNoType, err
}
recType := recNoType
recSize := 0
firstInt := int(util.ByteOrder.Uint32(header[:4]))
if recTyped {
recType = recordType(firstInt >> 24 & 0xFF)
recSize = firstInt & 0xFFFFFF
} else {
recSize = firstInt
}
if recSize == 0 && recType == 0 {
crc := util.ByteOrder.Uint32(header[4:recordHeaderSize])
if crc == 0 {
return buf, 0, 0, errNeedRewind
}
}
if expectedSize > 0 && recSize != expectedSize {
return buf, 0, 0, fmt.Errorf("expected record size to be %v bytes, got %v bytes", expectedSize, recSize)
}
// Now we are going to read the payload
buf = util.EnsureBufBigEnough(buf, recSize)
if _, err := io.ReadFull(r, buf[:recSize]); err != nil {
return buf, 0, recNoType, err
}
if checkCRC {
crc := util.ByteOrder.Uint32(header[4:recordHeaderSize])
// check CRC against what was stored
if c := crc32.Checksum(buf[:recSize], crcTable); c != crc {
return buf, 0, recNoType, fmt.Errorf("corrupted data, expected crc to be 0x%08x, got 0x%08x", crc, c)
}
}
return buf, recSize, recType, nil
}
// setSize sets the initial buffer size and keep track of min/max allowed sizes
func newBufferWriter(minShrinkSize, maxSize int) *bufferedWriter {
w := &bufferedWriter{minShrinkSize: minShrinkSize, maxSize: maxSize}
w.bufSize = minShrinkSize
// The minSize is the minimum size the buffer can shrink to.
// However, if the given max size is smaller than the min
// shrink size, use that instead.
if maxSize < minShrinkSize {
w.bufSize = maxSize
}
return w
}
// createNewWriter creates a new buffer writer for `file` with
// the bufferedWriter's current buffer size.
func (w *bufferedWriter) createNewWriter(file *os.File) io.Writer {
w.buf = bufio.NewWriterSize(file, w.bufSize)
return w.buf
}
// expand the buffer (first flushing the buffer if not empty)
func (w *bufferedWriter) expand(file *os.File, required int) (io.Writer, error) {
// If there was a request to shrink the buffer, cancel that.
w.shrinkReq = false
// If there was something, flush first
if w.buf.Buffered() > 0 {
if err := w.buf.Flush(); err != nil {
return w.buf, err
}
}
// Double the size
w.bufSize *= 2
// If still smaller than what is required, adjust
if w.bufSize < required {
w.bufSize = required
}
// But cap it.
if w.bufSize > w.maxSize {
w.bufSize = w.maxSize
}
w.buf = bufio.NewWriterSize(file, w.bufSize)
return w.buf, nil
}
// tryShrinkBuffer checks and possibly shrinks the buffer
func (w *bufferedWriter) tryShrinkBuffer(file *os.File) (io.Writer, error) {
// Nothing to do if we are already at the lowest
// or file not set/opened.
if w.bufSize == w.minShrinkSize || file == nil {
return w.buf, nil
}
if !w.shrinkReq {
percentFilled := w.buf.Buffered() * 100 / w.bufSize
if percentFilled <= bufShrinkThreshold {
w.shrinkReq = true
}
// Wait for next tick to see if we can shrink
return w.buf, nil
}
if err := w.buf.Flush(); err != nil {
return w.buf, err
}
// Reduce size, but ensure it does not go below the limit
w.bufSize /= 2
if w.bufSize < w.minShrinkSize {
w.bufSize = w.minShrinkSize
}
w.buf = bufio.NewWriterSize(file, w.bufSize)
// Don't reset shrinkReq unless we are down to the limit
if w.bufSize == w.minShrinkSize {
w.shrinkReq = true
}
return w.buf, nil
}
// checkShrinkRequest checks how full the buffer is, and if is above a certain
// threshold, cancels the shrink request
func (w *bufferedWriter) checkShrinkRequest() {
percentFilled := w.buf.Buffered() * 100 / w.bufSize
// If above the threshold, cancel the request.
if percentFilled > bufShrinkThreshold {
w.shrinkReq = false
}
}
////////////////////////////////////////////////////////////////////////////
// filesManager methods
////////////////////////////////////////////////////////////////////////////
// createFilesManager returns an instance of the files manager.
func createFilesManager(rootDir string, openedFilesLimit int64) *filesManager {
fm := &filesManager{
rootDir: rootDir,
limit: openedFilesLimit,
files: make(map[fileID]*file),
}
return fm
}
// closeUnusedFiles cloes files that are opened and not currently in-use.
// Since the number of opened files is a soft limit, and if this function
// is unable to close any file, the caller will still attempt to create/open
// the requested file. If the system's file descriptor limit is reached,
// opening the file will fail and that error will be returned to the caller.
// Lock is required on entry.
func (fm *filesManager) closeUnusedFiles(idToSkip fileID) {
for _, file := range fm.files {
if file.id == idToSkip {
continue
}
if atomic.CompareAndSwapInt32(&file.state, fileOpened, fileClosing) {
fm.doClose(file)
if fm.openedFDs < fm.limit {
break
}
}
}
}
// createFile creates a file, open it, adds it to the list of files and returns
// an instance of `*file` with the state sets to `fileInUse`.
// This call will possibly cause opened but unused files to be closed if the
// number of open file requests is above the set limit.
func (fm *filesManager) createFile(name string, flags int, bfc beforeFileClose) (*file, error) {
fm.Lock()
if fm.isClosed {
fm.Unlock()
return nil, fmt.Errorf("unable to create file %q, store is being closed", name)
}
if fm.limit > 0 && fm.openedFDs >= fm.limit {
fm.closeUnusedFiles(0)
}
fileName := filepath.Join(fm.rootDir, name)
handle, err := openFileWithFlags(fileName, flags)
if err != nil {
fm.Unlock()
return nil, err
}
fm.nextID++
newFile := &file{
state: fileInUse,
id: fm.nextID,
handle: handle,
name: fileName,
flags: flags,
beforeClose: bfc,
}
fm.files[newFile.id] = newFile
fm.openedFDs++
fm.Unlock()
return newFile, nil
}
// openFile opens the given file and sets its state to `fileInUse`.
// If the file manager has been closed or the file removed, this call
// returns an error.
// Otherwise, if the file's state is not `fileClosed` this call will panic.
// This call will possibly cause opened but unused files to be closed if the
// number of open file requests is above the set limit.
func (fm *filesManager) openFile(file *file) error {