-
Notifications
You must be signed in to change notification settings - Fork 453
/
decoder.go
741 lines (653 loc) · 21.5 KB
/
decoder.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
// Copyright (c) 2016 Uber Technologies, Inc
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE
package msgpack
import (
"errors"
"fmt"
"io"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/schema"
"github.com/m3db/m3/src/x/pool"
"gopkg.in/vmihailenco/msgpack.v2"
)
var (
emptyIndexInfo schema.IndexInfo
emptyIndexSummariesInfo schema.IndexSummariesInfo
emptyIndexBloomFilterInfo schema.IndexBloomFilterInfo
emptyIndexEntry schema.IndexEntry
emptyIndexSummary schema.IndexSummary
emptyIndexSummaryToken IndexSummaryToken
emptyLogInfo schema.LogInfo
emptyLogEntry schema.LogEntry
emptyLogMetadata schema.LogMetadata
emptyLogEntryRemainingToken DecodeLogEntryRemainingToken
errorUnableToDetermineNumFieldsToSkip = errors.New("unable to determine num fields to skip")
errorCalledDecodeBytesWithoutByteStreamDecoder = errors.New("called decodeBytes with out byte stream decoder")
errorIndexEntryChecksumMismatch = errors.New("decode index entry encountered checksum mismatch")
)
// Decoder decodes persisted msgpack-encoded data
type Decoder struct {
reader DecoderStream
// Will only be set if the Decoder is Reset() with a DecoderStream
// that also implements ByteStream.
byteReader ByteStream
// Wraps original reader with reader that can calculate digest. Digest calculation must be enabled,
// otherwise it defaults to off.
readerWithDigest *decoderStreamWithDigest
dec *msgpack.Decoder
err error
allocDecodedBytes bool
legacy legacyEncodingOptions
}
// NewDecoder creates a new decoder
func NewDecoder(opts DecodingOptions) *Decoder {
return newDecoder(defaultlegacyEncodingOptions, opts)
}
func newDecoder(legacy legacyEncodingOptions, opts DecodingOptions) *Decoder {
if opts == nil {
opts = NewDecodingOptions()
}
reader := NewByteDecoderStream(nil)
return &Decoder{
allocDecodedBytes: opts.AllocDecodedBytes(),
reader: reader,
dec: msgpack.NewDecoder(reader),
legacy: legacy,
readerWithDigest: newDecoderStreamWithDigest(nil),
}
}
// Reset resets the data stream to decode from
func (dec *Decoder) Reset(stream DecoderStream) {
dec.reader = stream
// Do the type assertion upfront so that we don't have to do it
// repeatedly later.
if byteStream, ok := stream.(ByteStream); ok {
dec.byteReader = byteStream
} else {
dec.byteReader = nil
}
dec.readerWithDigest.reset(dec.reader)
dec.dec.Reset(dec.readerWithDigest)
dec.err = nil
}
// DecodeIndexInfo decodes the index info
func (dec *Decoder) DecodeIndexInfo() (schema.IndexInfo, error) {
if dec.err != nil {
return emptyIndexInfo, dec.err
}
_, numFieldsToSkip := dec.decodeRootObject(indexInfoVersion, indexInfoType)
indexInfo := dec.decodeIndexInfo()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyIndexInfo, dec.err
}
return indexInfo, nil
}
// DecodeIndexEntry decodes index entry
func (dec *Decoder) DecodeIndexEntry(bytesPool pool.BytesPool) (schema.IndexEntry, error) {
if dec.err != nil {
return emptyIndexEntry, dec.err
}
dec.readerWithDigest.setDigestReaderEnabled(true)
_, numFieldsToSkip := dec.decodeRootObject(indexEntryVersion, indexEntryType)
indexEntry := dec.decodeIndexEntry(bytesPool)
dec.readerWithDigest.setDigestReaderEnabled(false)
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyIndexEntry, dec.err
}
return indexEntry, nil
}
// DecodeIndexSummary decodes index summary
func (dec *Decoder) DecodeIndexSummary() (
schema.IndexSummary, IndexSummaryToken, error) {
if dec.err != nil {
return emptyIndexSummary, emptyIndexSummaryToken, dec.err
}
_, numFieldsToSkip := dec.decodeRootObject(indexSummaryVersion, indexSummaryType)
indexSummary, indexSummaryMetadata := dec.decodeIndexSummary()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyIndexSummary, emptyIndexSummaryToken, dec.err
}
return indexSummary, indexSummaryMetadata, nil
}
// DecodeLogInfo decodes commit log info
func (dec *Decoder) DecodeLogInfo() (schema.LogInfo, error) {
if dec.err != nil {
return emptyLogInfo, dec.err
}
_, numFieldsToSkip := dec.decodeRootObject(logInfoVersion, logInfoType)
logInfo := dec.decodeLogInfo()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyLogInfo, dec.err
}
return logInfo, nil
}
// DecodeLogEntry decodes commit log entry
func (dec *Decoder) DecodeLogEntry() (schema.LogEntry, error) {
if dec.err != nil {
return emptyLogEntry, dec.err
}
_, numFieldsToSkip := dec.decodeRootObject(logEntryVersion, logEntryType)
logEntry := dec.decodeLogEntry()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyLogEntry, dec.err
}
return logEntry, nil
}
// DecodeLogEntryRemainingToken contains all the information that DecodeLogEntryRemaining
// requires to continue decoding a log entry after a call to DecodeLogEntryUniqueIndex.
type DecodeLogEntryRemainingToken struct {
numFieldsToSkip1 int
numFieldsToSkip2 int
}
// DecodeLogEntryUniqueIndex decodes a log entry as much as is required to return
// the series unique index. Call DecodeLogEntryRemaining afterwards to decode the
// remaining fields.
func (dec *Decoder) DecodeLogEntryUniqueIndex() (DecodeLogEntryRemainingToken, uint64, error) {
if dec.err != nil {
return emptyLogEntryRemainingToken, 0, dec.err
}
_, numFieldsToSkip1 := dec.decodeRootObject(logEntryVersion, logEntryType)
numFieldsToSkip2, _, ok := dec.checkNumFieldsFor(logEntryType, checkNumFieldsOptions{})
if !ok {
return emptyLogEntryRemainingToken, 0, errorUnableToDetermineNumFieldsToSkip
}
idx := dec.decodeVarUint()
token := DecodeLogEntryRemainingToken{
numFieldsToSkip1: numFieldsToSkip1,
numFieldsToSkip2: numFieldsToSkip2,
}
return token, idx, nil
}
// DecodeLogEntryRemaining can only be called after DecodeLogEntryUniqueIndex,
// and it returns a complete schema.LogEntry.
func (dec *Decoder) DecodeLogEntryRemaining(token DecodeLogEntryRemainingToken, index uint64) (schema.LogEntry, error) {
if dec.err != nil {
return emptyLogEntry, dec.err
}
var logEntry schema.LogEntry
logEntry.Index = index
logEntry.Create = dec.decodeVarint()
logEntry.Metadata, _, _ = dec.decodeBytes()
logEntry.Timestamp = dec.decodeVarint()
logEntry.Value = dec.decodeFloat64()
logEntry.Unit = uint32(dec.decodeVarUint())
logEntry.Annotation, _, _ = dec.decodeBytes()
dec.skip(token.numFieldsToSkip1)
if dec.err != nil {
return emptyLogEntry, dec.err
}
dec.skip(token.numFieldsToSkip2)
if dec.err != nil {
return emptyLogEntry, dec.err
}
return logEntry, nil
}
// DecodeLogMetadata decodes commit log metadata
func (dec *Decoder) DecodeLogMetadata() (schema.LogMetadata, error) {
if dec.err != nil {
return emptyLogMetadata, dec.err
}
_, numFieldsToSkip := dec.decodeRootObject(logMetadataVersion, logMetadataType)
logMetadata := dec.decodeLogMetadata()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyLogMetadata, dec.err
}
return logMetadata, nil
}
func (dec *Decoder) decodeIndexInfo() schema.IndexInfo {
var opts checkNumFieldsOptions
// Overrides only used to test forwards compatibility.
switch dec.legacy.decodeLegacyIndexInfoVersion {
case legacyEncodingIndexVersionV1:
// V1 had 6 fields.
opts.override = true
opts.numExpectedMinFields = 6
opts.numExpectedCurrFields = 6
case legacyEncodingIndexVersionV2:
// V2 had 8 fields.
opts.override = true
opts.numExpectedMinFields = 6
opts.numExpectedCurrFields = 8
case legacyEncodingIndexVersionV3:
// V3 had 9 fields.
opts.override = true
opts.numExpectedMinFields = 6
opts.numExpectedCurrFields = 9
case legacyEncodingIndexVersionV4:
// V4 had 10 fields.
opts.override = true
opts.numExpectedMinFields = 6
opts.numExpectedCurrFields = 10
}
numFieldsToSkip, actual, ok := dec.checkNumFieldsFor(indexInfoType, opts)
if !ok {
return emptyIndexInfo
}
var indexInfo schema.IndexInfo
indexInfo.BlockStart = dec.decodeVarint()
indexInfo.BlockSize = dec.decodeVarint()
indexInfo.Entries = dec.decodeVarint()
indexInfo.MajorVersion = dec.decodeVarint()
indexInfo.Summaries = dec.decodeIndexSummariesInfo()
indexInfo.BloomFilter = dec.decodeIndexBloomFilterInfo()
// At this point if its a V1 file we've decoded all the available fields.
if dec.legacy.decodeLegacyIndexInfoVersion == legacyEncodingIndexVersionV1 || actual < 8 {
dec.skip(numFieldsToSkip)
return indexInfo
}
// Decode fields added in V2.
indexInfo.SnapshotTime = dec.decodeVarint()
indexInfo.FileType = persist.FileSetType(dec.decodeVarint())
// At this point if its a V2 file we've decoded all the available fields.
if dec.legacy.decodeLegacyIndexInfoVersion == legacyEncodingIndexVersionV2 || actual < 9 {
dec.skip(numFieldsToSkip)
return indexInfo
}
// Decode fields added in V3.
indexInfo.SnapshotID, _, _ = dec.decodeBytes()
// At this point if its a V3 file we've decoded all the available fields.
if dec.legacy.decodeLegacyIndexInfoVersion == legacyEncodingIndexVersionV3 || actual < 10 {
dec.skip(numFieldsToSkip)
return indexInfo
}
// Decode fields added in V4.
indexInfo.VolumeIndex = int(dec.decodeVarint())
// At this point if its a V4 file we've decoded all the available fields.
if dec.legacy.decodeLegacyIndexInfoVersion == legacyEncodingIndexVersionV4 || actual < 11 {
dec.skip(numFieldsToSkip)
return indexInfo
}
// Decode fields added in V5.
indexInfo.MinorVersion = dec.decodeVarint()
dec.skip(numFieldsToSkip)
return indexInfo
}
func (dec *Decoder) decodeIndexSummariesInfo() schema.IndexSummariesInfo {
numFieldsToSkip, _, ok := dec.checkNumFieldsFor(indexSummariesInfoType, checkNumFieldsOptions{})
if !ok {
return emptyIndexSummariesInfo
}
var indexSummariesInfo schema.IndexSummariesInfo
indexSummariesInfo.Summaries = dec.decodeVarint()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyIndexSummariesInfo
}
return indexSummariesInfo
}
func (dec *Decoder) decodeIndexBloomFilterInfo() schema.IndexBloomFilterInfo {
numFieldsToSkip, _, ok := dec.checkNumFieldsFor(indexBloomFilterInfoType, checkNumFieldsOptions{})
if !ok {
return emptyIndexBloomFilterInfo
}
var indexBloomFilterInfo schema.IndexBloomFilterInfo
indexBloomFilterInfo.NumElementsM = dec.decodeVarint()
indexBloomFilterInfo.NumHashesK = dec.decodeVarint()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyIndexBloomFilterInfo
}
return indexBloomFilterInfo
}
func (dec *Decoder) decodeIndexEntry(bytesPool pool.BytesPool) schema.IndexEntry {
var opts checkNumFieldsOptions
switch dec.legacy.decodeLegacyIndexEntryVersion {
case legacyEncodingIndexEntryVersionV1:
// V1 had 5 fields.
opts.override = true
opts.numExpectedMinFields = 5
opts.numExpectedCurrFields = 5
case legacyEncodingIndexEntryVersionV2:
// V2 had 6 fields.
opts.override = true
opts.numExpectedMinFields = 5
opts.numExpectedCurrFields = 6
case legacyEncodingIndexEntryVersionCurrent:
// V3 is current version, no overrides needed
break
default:
dec.err = fmt.Errorf("invalid legacyEncodingIndexEntryVersion provided: %v", dec.legacy.decodeLegacyIndexEntryVersion)
return emptyIndexEntry
}
numFieldsToSkip, actual, ok := dec.checkNumFieldsFor(indexEntryType, opts)
if !ok {
return emptyIndexEntry
}
var indexEntry schema.IndexEntry
indexEntry.Index = dec.decodeVarint()
if bytesPool == nil {
indexEntry.ID, _, _ = dec.decodeBytes()
} else {
indexEntry.ID = dec.decodeBytesWithPool(bytesPool)
}
indexEntry.Size = dec.decodeVarint()
indexEntry.Offset = dec.decodeVarint()
indexEntry.DataChecksum = dec.decodeVarint()
// At this point, if its a V1 file, we've decoded all the available fields.
if dec.legacy.decodeLegacyIndexEntryVersion == legacyEncodingIndexEntryVersionV1 || actual < 6 {
dec.skip(numFieldsToSkip)
return indexEntry
}
// Decode fields added in V2
if bytesPool == nil {
indexEntry.EncodedTags, _, _ = dec.decodeBytes()
} else {
indexEntry.EncodedTags = dec.decodeBytesWithPool(bytesPool)
}
// At this point, if its a V2 file, we've decoded all the available fields.
if dec.legacy.decodeLegacyIndexEntryVersion == legacyEncodingIndexEntryVersionV2 || actual < 7 {
dec.skip(numFieldsToSkip)
return indexEntry
}
// NB(nate): Any new fields should be parsed here.
// Intentionally skip any extra fields here as we've stipulated that from V3 onward, IndexEntryChecksum will be the
// final field on index entries
dec.skip(numFieldsToSkip)
// Retrieve actual checksum value here. Attempting to retrieve after decoding the upcoming expected checksum field
// would include value in actual checksum calculation which would cause a mismatch
actualChecksum := dec.readerWithDigest.digest().Sum32()
// Decode checksum field originally added in V3
expectedChecksum := uint32(dec.decodeVarint())
if expectedChecksum != actualChecksum {
dec.err = errorIndexEntryChecksumMismatch
}
return indexEntry
}
func (dec *Decoder) decodeIndexSummary() (schema.IndexSummary, IndexSummaryToken) {
numFieldsToSkip, _, ok := dec.checkNumFieldsFor(indexSummaryType, checkNumFieldsOptions{})
if !ok {
return emptyIndexSummary, emptyIndexSummaryToken
}
var (
indexSummary schema.IndexSummary
idBytesStartOffset int
idBytesLength int
)
indexSummary.Index = dec.decodeVarint()
// Keep track of the offset in the byte stream before we decode the bytes so
// that we know exactly where to jump to if we want to just grab the ID itself
indexSummary.ID, idBytesStartOffset, idBytesLength = dec.decodeBytes()
indexSummary.IndexEntryOffset = dec.decodeVarint()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyIndexSummary, emptyIndexSummaryToken
}
// Downscaling to uint32 is fine because summary files and ID length should
// be well below the max value of a uint32
indexSummaryToken := NewIndexSummaryToken(
uint32(idBytesStartOffset), uint32(idBytesLength),
)
return indexSummary, indexSummaryToken
}
func (dec *Decoder) decodeLogInfo() schema.LogInfo {
numFieldsToSkip, _, ok := dec.checkNumFieldsFor(logInfoType, checkNumFieldsOptions{})
if !ok {
return emptyLogInfo
}
var logInfo schema.LogInfo
// Deprecated, have to decode anyways for backwards compatibility, but we ignore the values.
logInfo.DeprecatedDoNotUseStart = dec.decodeVarint()
logInfo.DeprecatedDoNotUseDuration = dec.decodeVarint()
logInfo.Index = dec.decodeVarint()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyLogInfo
}
return logInfo
}
func (dec *Decoder) decodeLogEntry() schema.LogEntry {
numFieldsToSkip, _, ok := dec.checkNumFieldsFor(logEntryType, checkNumFieldsOptions{})
if !ok {
return emptyLogEntry
}
var logEntry schema.LogEntry
logEntry.Index = dec.decodeVarUint()
logEntry.Create = dec.decodeVarint()
logEntry.Metadata, _, _ = dec.decodeBytes()
logEntry.Timestamp = dec.decodeVarint()
logEntry.Value = dec.decodeFloat64()
logEntry.Unit = uint32(dec.decodeVarUint())
logEntry.Annotation, _, _ = dec.decodeBytes()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyLogEntry
}
return logEntry
}
func (dec *Decoder) decodeLogMetadata() schema.LogMetadata {
numFieldsToSkip, _, ok := dec.checkNumFieldsFor(logMetadataType, checkNumFieldsOptions{})
if !ok {
return emptyLogMetadata
}
var logMetadata schema.LogMetadata
logMetadata.ID, _, _ = dec.decodeBytes()
logMetadata.Namespace, _, _ = dec.decodeBytes()
logMetadata.Shard = uint32(dec.decodeVarUint())
logMetadata.EncodedTags, _, _ = dec.decodeBytes()
dec.skip(numFieldsToSkip)
if dec.err != nil {
return emptyLogMetadata
}
return logMetadata
}
func (dec *Decoder) decodeRootObject(expectedVersion int, expectedType objectType) (version int, numFieldsToSkip int) {
version = dec.checkVersion(expectedVersion)
if dec.err != nil {
return 0, 0
}
numFieldsToSkip, _, ok := dec.checkNumFieldsFor(rootObjectType, checkNumFieldsOptions{})
if !ok {
return 0, 0
}
actualType := dec.decodeObjectType()
if dec.err != nil {
return 0, 0
}
if expectedType != actualType {
dec.err = fmt.Errorf("object type mismatch: expected %v actual %v", expectedType, actualType)
return 0, 0
}
return version, numFieldsToSkip
}
func (dec *Decoder) checkVersion(expected int) int {
version := int(dec.decodeVarint())
if dec.err != nil {
return 0
}
if version > expected {
dec.err = fmt.Errorf("version mismatch: expected %v actual %v", expected, version)
return 0
}
return version
}
type checkNumFieldsOptions struct {
override bool
numExpectedMinFields int
numExpectedCurrFields int
}
func (dec *Decoder) checkNumFieldsFor(
objType objectType,
opts checkNumFieldsOptions,
) (numToSkip int, actual int, ok bool) {
actual = dec.decodeNumObjectFields()
if dec.err != nil {
return 0, 0, false
}
min, curr := numFieldsForType(objType)
if opts.override {
min = opts.numExpectedMinFields
curr = opts.numExpectedCurrFields
}
if min > actual {
dec.err = fmt.Errorf("number of fields mismatch: expected minimum of %d actual %d", min, actual)
return 0, 0, false
}
numToSkip = actual - curr
if numToSkip < 0 {
numToSkip = 0
}
return numToSkip, actual, true
}
func (dec *Decoder) skip(numFields int) {
if dec.err != nil {
return
}
if numFields < 0 {
dec.err = fmt.Errorf("number of fields to skip is %d", numFields)
return
}
for i := 0; i < numFields; i++ {
if err := dec.dec.Skip(); err != nil {
dec.err = err
return
}
}
}
func (dec *Decoder) decodeNumObjectFields() int {
return dec.decodeArrayLen()
}
func (dec *Decoder) decodeObjectType() objectType {
return objectType(dec.decodeVarint())
}
func (dec *Decoder) decodeVarint() int64 {
if dec.err != nil {
return 0
}
value, err := dec.dec.DecodeInt64()
dec.err = err
return value
}
func (dec *Decoder) decodeVarUint() uint64 {
if dec.err != nil {
return 0
}
value, err := dec.dec.DecodeUint64()
dec.err = err
return value
}
func (dec *Decoder) decodeFloat64() float64 {
if dec.err != nil {
return 0.0
}
value, err := dec.dec.DecodeFloat64()
dec.err = err
return value
}
// Should only be called if dec.byteReader != nil.
func (dec *Decoder) decodeBytes() ([]byte, int, int) {
if dec.err != nil {
return nil, -1, -1
}
// If we need to allocate new space for decoded byte slice, we delegate it to msgpack
// API which allocates a new slice under the hood, otherwise we simply locate the byte
// slice as part of the encoded byte stream and return it
var value []byte
if dec.allocDecodedBytes {
value, dec.err = dec.dec.DecodeBytes()
return value, -1, -1
}
if dec.byteReader == nil {
// If we're not allowing the msgpack library to allocate the bytes and we haven't been
// provided a byte decoder stream, then we've reached an invalid state as its not
// possible for us to decode the bytes in an alloc-less way.
dec.err = errorCalledDecodeBytesWithoutByteStreamDecoder
return nil, -1, -1
}
var (
bytesLen = dec.decodeBytesLen()
backingBytes = dec.byteReader.Bytes()
numBytes = len(backingBytes)
currPos = int(int64(numBytes) - dec.byteReader.Remaining())
)
if dec.err != nil {
return nil, -1, -1
}
// NB(xichen): DecodeBytesLen() returns -1 if the byte slice is nil
if bytesLen == -1 {
return nil, -1, -1
}
targetPos := currPos + bytesLen
if bytesLen < 0 || currPos < 0 || targetPos > numBytes {
dec.err = fmt.Errorf("invalid currPos %d, bytesLen %d, numBytes %d", currPos, bytesLen, numBytes)
return nil, -1, -1
}
if err := dec.byteReader.Skip(int64(bytesLen)); err != nil {
dec.err = err
return nil, -1, -1
}
value = backingBytes[currPos:targetPos]
if err := dec.readerWithDigest.capture(value); err != nil {
dec.err = err
return nil, -1, -1
}
return value, currPos, bytesLen
}
func (dec *Decoder) decodeBytesWithPool(bytesPool pool.BytesPool) []byte {
if dec.err != nil {
return nil
}
bytesLen := dec.decodeBytesLen()
if dec.err != nil {
return nil
}
if bytesLen < 0 {
return nil
}
bytes := bytesPool.Get(bytesLen)[:bytesLen]
n, err := io.ReadFull(dec.readerWithDigest, bytes)
if err != nil {
dec.err = err
bytesPool.Put(bytes)
return nil
}
if n != bytesLen {
// This check is redundant because io.ReadFull will return an error if
// its not able to read the specified number of bytes, but we keep it
// in for posterity.
dec.err = fmt.Errorf(
"tried to decode checked bytes of length: %d, but read: %d",
bytesLen, n)
bytesPool.Put(bytes)
return nil
}
return bytes
}
func (dec *Decoder) decodeArrayLen() int {
if dec.err != nil {
return 0
}
value, err := dec.dec.DecodeArrayLen()
dec.err = err
return value
}
func (dec *Decoder) decodeBytesLen() int {
if dec.err != nil {
return 0
}
value, err := dec.dec.DecodeBytesLen()
dec.err = err
return value
}