forked from perkeep/perkeep
-
-
Notifications
You must be signed in to change notification settings - Fork 1
/
index.go
1585 lines (1458 loc) · 41.5 KB
/
index.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package index
import (
"bytes"
"errors"
"fmt"
"io"
"log"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"camlistore.org/pkg/blob"
"camlistore.org/pkg/blobserver"
"camlistore.org/pkg/env"
"camlistore.org/pkg/schema"
"camlistore.org/pkg/sorted"
"camlistore.org/pkg/types/camtypes"
"go4.org/jsonconfig"
"go4.org/strutil"
"go4.org/types"
"golang.org/x/net/context"
)
func init() {
blobserver.RegisterStorageConstructor("index", newFromConfig)
}
type Index struct {
*blobserver.NoImplStorage
s sorted.KeyValue
KeyFetcher blob.Fetcher // for verifying claims
// TODO(mpl): do not init and use deletes when we have a corpus. Since corpus has its own deletes now, they are redundant.
// deletes is a cache to keep track of the deletion status (deleted vs undeleted)
// of the blobs in the index. It makes for faster reads than the otherwise
// recursive calls on the index.
deletes *deletionCache
corpus *Corpus // or nil, if not being kept in memory
mu sync.RWMutex // guards following
//mu syncdebug.RWMutexTracker // (when debugging)
// needs maps from a blob to the missing blobs it needs to
// finish indexing.
needs map[blob.Ref][]blob.Ref
// neededBy is the inverse of needs. The keys are missing blobs
// and the value(s) are blobs waiting to be reindexed.
neededBy map[blob.Ref][]blob.Ref
readyReindex map[blob.Ref]bool // set of things ready to be re-indexed
// reindexWg is used to make sure that we wait for all asynchronous, out
// of order, indexing to be finished, at the end of reindexing.
reindexWg sync.WaitGroup
// oooDisabled reports whether out of order indexing is disabled. It
// should only be the case in some very specific tests.
oooDisabled bool
// blobSource is used for fetching blobs when indexing files and other
// blobs types that reference other objects.
// The only write access to blobSource should be its initialization (transition
// from nil to non-nil), once, and protected by mu.
blobSource blobserver.FetcherEnumerator
hasWiped bool // whether Wipe has been called on s. So we don't redo it in Reindex() for nothing.
}
func (x *Index) Lock() { x.mu.Lock() }
func (x *Index) Unlock() { x.mu.Unlock() }
func (x *Index) RLock() { x.mu.RLock() }
func (x *Index) RUnlock() { x.mu.RUnlock() }
var (
_ blobserver.Storage = (*Index)(nil)
_ Interface = (*Index)(nil)
)
var aboutToReindex = false
// TODO(mpl): I'm not sure there are any cases where we don't want the index to
// have a blobSource, so maybe we should phase out InitBlobSource and integrate it
// to New or something. But later.
// InitBlobSource sets the index's blob source and starts the background
// out-of-order indexing loop. It panics if the blobSource is already set.
// If the index's key fetcher is nil, it is also set to the blobSource
// argument.
func (x *Index) InitBlobSource(blobSource blobserver.FetcherEnumerator) {
x.Lock()
defer x.Unlock()
if x.blobSource != nil {
panic("blobSource of Index already set")
}
x.blobSource = blobSource
if x.KeyFetcher == nil {
x.KeyFetcher = blobSource
}
}
// New returns a new index using the provided key/value storage implementation.
func New(s sorted.KeyValue) (*Index, error) {
idx := &Index{
s: s,
needs: make(map[blob.Ref][]blob.Ref),
neededBy: make(map[blob.Ref][]blob.Ref),
readyReindex: make(map[blob.Ref]bool),
}
if aboutToReindex {
idx.deletes = newDeletionCache()
return idx, nil
}
schemaVersion := idx.schemaVersion()
switch {
case schemaVersion == 0 && idx.isEmpty():
// New index.
err := idx.s.Set(keySchemaVersion.name, fmt.Sprint(requiredSchemaVersion))
if err != nil {
return nil, fmt.Errorf("Could not write index schema version %q: %v", requiredSchemaVersion, err)
}
case schemaVersion != requiredSchemaVersion:
tip := ""
if env.IsDev() {
// Good signal that we're using the devcam server, so help out
// the user with a more useful tip:
tip = `(For the dev server, run "devcam server --wipe" to wipe both your blobs and index)`
} else {
if is4To5SchemaBump(schemaVersion) {
return idx, errMissingWholeRef
}
tip = "Run 'camlistored --reindex' (it might take awhile, but shows status). Alternative: 'camtool dbinit' (or just delete the file for a file based index), and then 'camtool sync --all'"
}
return nil, fmt.Errorf("index schema version is %d; required one is %d. You need to reindex. %s",
schemaVersion, requiredSchemaVersion, tip)
}
if err := idx.initDeletesCache(); err != nil {
return nil, fmt.Errorf("Could not initialize index's deletes cache: %v", err)
}
if err := idx.initNeededMaps(); err != nil {
return nil, fmt.Errorf("Could not initialize index's missing blob maps: %v", err)
}
return idx, nil
}
func is4To5SchemaBump(schemaVersion int) bool {
return schemaVersion == 4 && requiredSchemaVersion == 5
}
var errMissingWholeRef = errors.New("missing wholeRef field in fileInfo rows")
// fixMissingWholeRef appends the wholeRef to all the keyFileInfo rows values. It should
// only be called to upgrade a version 4 index schema to version 5.
func (x *Index) fixMissingWholeRef(fetcher blob.Fetcher) (err error) {
// We did that check from the caller, but double-check again to prevent from misuse
// of that function.
if x.schemaVersion() != 4 || requiredSchemaVersion != 5 {
panic("fixMissingWholeRef should only be used when upgrading from v4 to v5 of the index schema")
}
log.Println("index: fixing the missing wholeRef in the fileInfo rows...")
defer func() {
if err != nil {
log.Printf("index: fixing the fileInfo rows failed: %v", err)
return
}
log.Print("index: successfully fixed wholeRef in FileInfo rows.")
}()
// first build a reverted keyWholeToFileRef map, so we can get the wholeRef from the fileRef easily.
fileRefToWholeRef := make(map[blob.Ref]blob.Ref)
it := x.queryPrefix(keyWholeToFileRef)
var keyA [3]string
for it.Next() {
keyPart := strutil.AppendSplitN(keyA[:0], it.Key(), "|", 3)
if len(keyPart) != 3 {
return fmt.Errorf("bogus keyWholeToFileRef key: got %q, wanted \"wholetofile|wholeRef|fileRef\"", it.Key())
}
wholeRef, ok1 := blob.Parse(keyPart[1])
fileRef, ok2 := blob.Parse(keyPart[2])
if !ok1 || !ok2 {
return fmt.Errorf("bogus part in keyWholeToFileRef key: %q", it.Key())
}
fileRefToWholeRef[fileRef] = wholeRef
}
if err := it.Close(); err != nil {
return err
}
var fixedEntries, missedEntries int
t := time.NewTicker(5 * time.Second)
defer t.Stop()
// We record the mutations and set them all after the iteration because of the sqlite locking:
// since BeginBatch takes a lock, and Find too, we would deadlock at queryPrefix if we
// started a batch mutation before.
mutations := make(map[string]string)
keyPrefix := keyFileInfo.name + "|"
it = x.queryPrefix(keyFileInfo)
defer it.Close()
var valA [3]string
for it.Next() {
select {
case <-t.C:
log.Printf("Recorded %d missing wholeRef that we'll try to fix, and %d that we can't fix.", fixedEntries, missedEntries)
default:
}
br, ok := blob.ParseBytes(it.KeyBytes()[len(keyPrefix):])
if !ok {
return fmt.Errorf("invalid blobRef %q", it.KeyBytes()[len(keyPrefix):])
}
wholeRef, ok := fileRefToWholeRef[br]
if !ok {
missedEntries++
log.Printf("WARNING: wholeRef for %v not found in index. You should probably rebuild the whole index.", br)
continue
}
valPart := strutil.AppendSplitN(valA[:0], it.Value(), "|", 3)
// The old format we're fixing should be: size|filename|mimetype
if len(valPart) != 3 {
return fmt.Errorf("bogus keyFileInfo value: got %q, wanted \"size|filename|mimetype\"", it.Value())
}
size_s, filename, mimetype := valPart[0], valPart[1], urld(valPart[2])
if strings.Contains(mimetype, "|") {
// I think this can only happen for people migrating from a commit at least as recent as
// 8229c1985079681a652cb65551b4e80a10d135aa, when wholeRef was introduced to keyFileInfo
// but there was no migration code yet.
// For the "production" migrations between 0.8 and 0.9, the index should not have any wholeRef
// in the keyFileInfo entries. So if something goes wrong and is somehow linked to that happening,
// I'd like to know about it, hence the logging.
log.Printf("%v: %v already has a wholeRef, not fixing it", it.Key(), it.Value())
continue
}
size, err := strconv.Atoi(size_s)
if err != nil {
return fmt.Errorf("bogus size in keyFileInfo value %v: %v", it.Value(), err)
}
mutations[keyFileInfo.Key(br)] = keyFileInfo.Val(size, filename, mimetype, wholeRef)
fixedEntries++
}
if err := it.Close(); err != nil {
return err
}
log.Printf("Starting to commit the missing wholeRef fixes (%d entries) now, this can take a while.", fixedEntries)
bm := x.s.BeginBatch()
for k, v := range mutations {
bm.Set(k, v)
}
bm.Set(keySchemaVersion.name, "5")
if err := x.s.CommitBatch(bm); err != nil {
return err
}
if missedEntries > 0 {
log.Printf("Some missing wholeRef entries were not fixed (%d), you should do a full reindex.", missedEntries)
}
return nil
}
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
blobPrefix := config.RequiredString("blobSource")
kvConfig := config.RequiredObject("storage")
reindex := config.OptionalBool("reindex", false)
if err := config.Validate(); err != nil {
return nil, err
}
kv, err := sorted.NewKeyValue(kvConfig)
if err != nil {
if _, ok := err.(sorted.NeedWipeError); !ok {
return nil, err
}
if !reindex {
return nil, err
}
}
if reindex {
aboutToReindex = true
wiper, ok := kv.(sorted.Wiper)
if !ok {
return nil, fmt.Errorf("index's storage type %T doesn't support sorted.Wiper", kv)
}
if err := wiper.Wipe(); err != nil {
return nil, fmt.Errorf("error wiping index's sorted key/value type %T: %v", kv, err)
}
log.Printf("Index wiped.")
}
sto, err := ld.GetStorage(blobPrefix)
if err != nil {
return nil, err
}
ix, err := New(kv)
// TODO(mpl): next time we need to do another fix, make a new error
// type that lets us apply the needed fix depending on its value or
// something. For now just one value/fix.
if err == errMissingWholeRef {
// TODO: maybe we don't want to do that automatically. Brad says
// we have to think about the case on GCE/CoreOS in particular.
if err := ix.fixMissingWholeRef(sto); err != nil {
ix.Close()
return nil, fmt.Errorf("could not fix missing wholeRef entries: %v", err)
}
ix, err = New(kv)
}
if reindex {
ix.hasWiped = true
}
if err != nil {
return nil, err
}
ix.InitBlobSource(sto)
return ix, err
}
func (x *Index) String() string {
return fmt.Sprintf("Camlistore index, using key/value implementation %T", x.s)
}
func (x *Index) isEmpty() bool {
iter := x.s.Find("", "")
hasRows := iter.Next()
if err := iter.Close(); err != nil {
panic(err)
}
return !hasRows
}
// reindexMaxProcs is the number of concurrent goroutines that will be used for reindexing.
var reindexMaxProcs = struct {
sync.RWMutex
v int
}{v: 4}
// SetReindexMaxProcs sets the maximum number of concurrent goroutines that are
// used during reindexing.
func SetReindexMaxProcs(n int) {
reindexMaxProcs.Lock()
defer reindexMaxProcs.Unlock()
reindexMaxProcs.v = n
}
// ReindexMaxProcs returns the maximum number of concurrent goroutines that are
// used during reindexing.
func ReindexMaxProcs() int {
reindexMaxProcs.RLock()
defer reindexMaxProcs.RUnlock()
return reindexMaxProcs.v
}
func (x *Index) Reindex() error {
x.Lock()
if x.blobSource == nil {
x.Unlock()
return errors.New("index: can't re-index: no blobSource")
}
x.Unlock()
reindexMaxProcs.RLock()
defer reindexMaxProcs.RUnlock()
ctx := context.TODO()
if !x.hasWiped {
wiper, ok := x.s.(sorted.Wiper)
if !ok {
return fmt.Errorf("index's storage type %T doesn't support sorted.Wiper", x.s)
}
log.Printf("Wiping index storage type %T ...", x.s)
if err := wiper.Wipe(); err != nil {
return fmt.Errorf("error wiping index's sorted key/value type %T: %v", x.s, err)
}
log.Printf("Index wiped.")
}
log.Printf("Rebuilding index...")
reindexStart, _ := blob.Parse(os.Getenv("CAMLI_REINDEX_START"))
err := x.s.Set(keySchemaVersion.name, fmt.Sprintf("%d", requiredSchemaVersion))
if err != nil {
return err
}
var nerrmu sync.Mutex
nerr := 0
blobc := make(chan blob.Ref, 32)
enumCtx := context.TODO()
enumErr := make(chan error, 1)
go func() {
defer close(blobc)
donec := enumCtx.Done()
var lastTick time.Time
enumErr <- blobserver.EnumerateAll(enumCtx, x.blobSource, func(sb blob.SizedRef) error {
now := time.Now()
if lastTick.Before(now.Add(-1 * time.Second)) {
log.Printf("Reindexing at %v", sb.Ref)
lastTick = now
}
if reindexStart.Valid() && sb.Ref.Less(reindexStart) {
return nil
}
select {
case <-donec:
return ctx.Err()
case blobc <- sb.Ref:
return nil
}
})
}()
var wg sync.WaitGroup
for i := 0; i < reindexMaxProcs.v; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for br := range blobc {
if err := x.indexBlob(br); err != nil {
log.Printf("Error reindexing %v: %v", br, err)
nerrmu.Lock()
nerr++
nerrmu.Unlock()
// TODO: flag (or default?) to stop the EnumerateAll above once
// there's any error with reindexing?
}
}
}()
}
if err := <-enumErr; err != nil {
return err
}
wg.Wait()
x.reindexWg.Wait()
x.RLock()
readyCount := len(x.readyReindex)
needed := len(x.needs)
x.RUnlock()
if readyCount > 0 {
return fmt.Errorf("%d blobs were ready to reindex in out-of-order queue, but not yet ran", readyCount)
}
if needed > 0 {
return fmt.Errorf("%d blobs are still needed as dependencies", needed)
}
log.Printf("Index rebuild complete.")
nerrmu.Lock() // no need to unlock
if nerr != 0 {
return fmt.Errorf("%d blobs failed to re-index", nerr)
}
if err := x.initDeletesCache(); err != nil {
return err
}
return nil
}
func queryPrefixString(s sorted.KeyValue, prefix string) sorted.Iterator {
if prefix == "" {
return s.Find("", "")
}
lastByte := prefix[len(prefix)-1]
if lastByte == 0xff {
panic("unsupported query prefix ending in 0xff")
}
end := prefix[:len(prefix)-1] + string(lastByte+1)
return s.Find(prefix, end)
}
func (x *Index) queryPrefixString(prefix string) sorted.Iterator {
return queryPrefixString(x.s, prefix)
}
func queryPrefix(s sorted.KeyValue, key *keyType, args ...interface{}) sorted.Iterator {
return queryPrefixString(s, key.Prefix(args...))
}
func (x *Index) queryPrefix(key *keyType, args ...interface{}) sorted.Iterator {
return x.queryPrefixString(key.Prefix(args...))
}
func closeIterator(it sorted.Iterator, perr *error) {
err := it.Close()
if err != nil && *perr == nil {
*perr = err
}
}
// schemaVersion returns the version of schema as it is found
// in the currently used index. If not found, it returns 0.
func (x *Index) schemaVersion() int {
schemaVersionStr, err := x.s.Get(keySchemaVersion.name)
if err != nil {
if err == sorted.ErrNotFound {
return 0
}
panic(fmt.Sprintf("Could not get index schema version: %v", err))
}
schemaVersion, err := strconv.Atoi(schemaVersionStr)
if err != nil {
panic(fmt.Sprintf("Bogus index schema version: %q", schemaVersionStr))
}
return schemaVersion
}
type deletion struct {
deleter blob.Ref
when time.Time
}
type byDeletionDate []deletion
func (d byDeletionDate) Len() int { return len(d) }
func (d byDeletionDate) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d byDeletionDate) Less(i, j int) bool { return d[i].when.Before(d[j].when) }
type deletionCache struct {
sync.RWMutex
m map[blob.Ref][]deletion
}
func newDeletionCache() *deletionCache {
return &deletionCache{
m: make(map[blob.Ref][]deletion),
}
}
// initDeletesCache creates and populates the deletion status cache used by the index
// for faster calls to IsDeleted and DeletedAt. It is called by New.
func (x *Index) initDeletesCache() (err error) {
x.deletes = newDeletionCache()
it := x.queryPrefix(keyDeleted)
defer closeIterator(it, &err)
for it.Next() {
cl, ok := kvDeleted(it.Key())
if !ok {
return fmt.Errorf("Bogus keyDeleted entry key: want |\"deleted\"|<deleted blobref>|<reverse claimdate>|<deleter claim>|, got %q", it.Key())
}
targetDeletions := append(x.deletes.m[cl.Target],
deletion{
deleter: cl.BlobRef,
when: cl.Date,
})
sort.Sort(sort.Reverse(byDeletionDate(targetDeletions)))
x.deletes.m[cl.Target] = targetDeletions
}
return err
}
func kvDeleted(k string) (c camtypes.Claim, ok bool) {
// TODO(bradfitz): garbage
keyPart := strings.Split(k, "|")
if len(keyPart) != 4 {
return
}
if keyPart[0] != "deleted" {
return
}
target, ok := blob.Parse(keyPart[1])
if !ok {
return
}
claimRef, ok := blob.Parse(keyPart[3])
if !ok {
return
}
date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[2]))
if err != nil {
return
}
return camtypes.Claim{
BlobRef: claimRef,
Target: target,
Date: date,
Type: string(schema.DeleteClaim),
}, true
}
// IsDeleted reports whether the provided blobref (of a permanode or
// claim) should be considered deleted.
func (x *Index) IsDeleted(br blob.Ref) bool {
if x.deletes == nil {
// We still allow the slow path, in case someone creates
// their own Index without a deletes cache.
return x.isDeletedNoCache(br)
}
x.deletes.RLock()
defer x.deletes.RUnlock()
return x.isDeleted(br)
}
// The caller must hold x.deletes.mu for read.
func (x *Index) isDeleted(br blob.Ref) bool {
deletes, ok := x.deletes.m[br]
if !ok {
return false
}
for _, v := range deletes {
if !x.isDeleted(v.deleter) {
return true
}
}
return false
}
// Used when the Index has no deletes cache (x.deletes is nil).
func (x *Index) isDeletedNoCache(br blob.Ref) bool {
var err error
it := x.queryPrefix(keyDeleted, br)
for it.Next() {
cl, ok := kvDeleted(it.Key())
if !ok {
panic(fmt.Sprintf("Bogus keyDeleted entry key: want |\"deleted\"|<deleted blobref>|<reverse claimdate>|<deleter claim>|, got %q", it.Key()))
}
if !x.isDeletedNoCache(cl.BlobRef) {
closeIterator(it, &err)
if err != nil {
// TODO: Do better?
panic(fmt.Sprintf("Could not close iterator on keyDeleted: %v", err))
}
return true
}
}
closeIterator(it, &err)
if err != nil {
// TODO: Do better?
panic(fmt.Sprintf("Could not close iterator on keyDeleted: %v", err))
}
return false
}
// GetRecentPermanodes sends results to dest filtered by owner, limit, and
// before. A zero value for before will default to the current time. The
// results will have duplicates supressed, with most recent permanode
// returned.
// Note, permanodes more recent than before will still be fetched from the
// index then skipped. This means runtime scales linearly with the number of
// nodes more recent than before.
func (x *Index) GetRecentPermanodes(ctx context.Context, dest chan<- camtypes.RecentPermanode, owner blob.Ref, limit int, before time.Time) (err error) {
defer close(dest)
keyId, err := x.KeyId(ctx, owner)
if err == sorted.ErrNotFound {
log.Printf("No recent permanodes because keyId for owner %v not found", owner)
return nil
}
if err != nil {
log.Printf("Error fetching keyId for owner %v: %v", owner, err)
return err
}
sent := 0
var seenPermanode dupSkipper
if before.IsZero() {
before = time.Now()
}
// TODO(bradfitz): handle before efficiently. don't use queryPrefix.
it := x.queryPrefix(keyRecentPermanode, keyId)
defer closeIterator(it, &err)
for it.Next() {
permaStr := it.Value()
parts := strings.SplitN(it.Key(), "|", 4)
if len(parts) != 4 {
continue
}
mTime, _ := time.Parse(time.RFC3339, unreverseTimeString(parts[2]))
permaRef, ok := blob.Parse(permaStr)
if !ok {
continue
}
if x.IsDeleted(permaRef) {
continue
}
if seenPermanode.Dup(permaStr) {
continue
}
// Skip entries with an mTime less than or equal to before.
if !mTime.Before(before) {
continue
}
dest <- camtypes.RecentPermanode{
Permanode: permaRef,
Signer: owner, // TODO(bradfitz): kinda. usually. for now.
LastModTime: mTime,
}
sent++
if sent == limit {
break
}
}
return nil
}
func (x *Index) AppendClaims(ctx context.Context, dst []camtypes.Claim, permaNode blob.Ref,
signerFilter blob.Ref,
attrFilter string) ([]camtypes.Claim, error) {
if x.corpus != nil {
return x.corpus.AppendClaims(ctx, dst, permaNode, signerFilter, attrFilter)
}
var (
keyId string
err error
it sorted.Iterator
)
if signerFilter.Valid() {
keyId, err = x.KeyId(ctx, signerFilter)
if err == sorted.ErrNotFound {
return nil, nil
}
if err != nil {
return nil, err
}
it = x.queryPrefix(keyPermanodeClaim, permaNode, keyId)
} else {
it = x.queryPrefix(keyPermanodeClaim, permaNode)
}
defer closeIterator(it, &err)
// In the common case, an attribute filter is just a plain
// token ("camliContent") unescaped. If so, fast path that
// check to skip the row before we even split it.
var mustHave string
if attrFilter != "" && urle(attrFilter) == attrFilter {
mustHave = attrFilter
}
for it.Next() {
val := it.Value()
if mustHave != "" && !strings.Contains(val, mustHave) {
continue
}
cl, ok := kvClaim(it.Key(), val, blob.Parse)
if !ok {
continue
}
if x.IsDeleted(cl.BlobRef) {
continue
}
if attrFilter != "" && cl.Attr != attrFilter {
continue
}
if signerFilter.Valid() && cl.Signer != signerFilter {
continue
}
dst = append(dst, cl)
}
return dst, nil
}
func kvClaim(k, v string, blobParse func(string) (blob.Ref, bool)) (c camtypes.Claim, ok bool) {
const nKeyPart = 5
const nValPart = 4
var keya [nKeyPart]string
var vala [nValPart]string
keyPart := strutil.AppendSplitN(keya[:0], k, "|", -1)
valPart := strutil.AppendSplitN(vala[:0], v, "|", -1)
if len(keyPart) < nKeyPart || len(valPart) < nValPart {
return
}
signerRef, ok := blobParse(valPart[3])
if !ok {
return
}
permaNode, ok := blobParse(keyPart[1])
if !ok {
return
}
claimRef, ok := blobParse(keyPart[4])
if !ok {
return
}
date, err := time.Parse(time.RFC3339, keyPart[3])
if err != nil {
return
}
return camtypes.Claim{
BlobRef: claimRef,
Signer: signerRef,
Permanode: permaNode,
Date: date,
Type: urld(valPart[0]),
Attr: urld(valPart[1]),
Value: urld(valPart[2]),
}, true
}
func (x *Index) GetBlobMeta(ctx context.Context, br blob.Ref) (camtypes.BlobMeta, error) {
if x.corpus != nil {
return x.corpus.GetBlobMeta(ctx, br)
}
key := "meta:" + br.String()
meta, err := x.s.Get(key)
if err == sorted.ErrNotFound {
err = os.ErrNotExist
}
if err != nil {
return camtypes.BlobMeta{}, err
}
pos := strings.Index(meta, "|")
if pos < 0 {
panic(fmt.Sprintf("Bogus index row for key %q: got value %q", key, meta))
}
size, err := strconv.ParseUint(meta[:pos], 10, 32)
if err != nil {
return camtypes.BlobMeta{}, err
}
mime := meta[pos+1:]
return camtypes.BlobMeta{
Ref: br,
Size: uint32(size),
CamliType: camliTypeFromMIME(mime),
}, nil
}
func (x *Index) KeyId(ctx context.Context, signer blob.Ref) (string, error) {
if x.corpus != nil {
return x.corpus.KeyId(ctx, signer)
}
return x.s.Get("signerkeyid:" + signer.String())
}
func (x *Index) PermanodeOfSignerAttrValue(ctx context.Context, signer blob.Ref, attr, val string) (permaNode blob.Ref, err error) {
keyId, err := x.KeyId(ctx, signer)
if err == sorted.ErrNotFound {
return blob.Ref{}, os.ErrNotExist
}
if err != nil {
return blob.Ref{}, err
}
it := x.queryPrefix(keySignerAttrValue, keyId, attr, val)
defer closeIterator(it, &err)
for it.Next() {
permaRef, ok := blob.Parse(it.Value())
if ok && !x.IsDeleted(permaRef) {
return permaRef, nil
}
}
return blob.Ref{}, os.ErrNotExist
}
// This is just like PermanodeOfSignerAttrValue except we return multiple and dup-suppress.
// If request.Query is "", it is not used in the prefix search.
func (x *Index) SearchPermanodesWithAttr(ctx context.Context, dest chan<- blob.Ref, request *camtypes.PermanodeByAttrRequest) (err error) {
defer close(dest)
if request.FuzzyMatch {
// TODO(bradfitz): remove this for now? figure out how to handle it generically?
return errors.New("TODO: SearchPermanodesWithAttr: generic indexer doesn't support FuzzyMatch on PermanodeByAttrRequest")
}
if request.Attribute == "" {
return errors.New("index: missing Attribute in SearchPermanodesWithAttr")
}
keyId, err := x.KeyId(ctx, request.Signer)
if err == sorted.ErrNotFound {
return nil
}
if err != nil {
return err
}
seen := make(map[string]bool)
var it sorted.Iterator
if request.Query == "" {
it = x.queryPrefix(keySignerAttrValue, keyId, request.Attribute)
} else {
it = x.queryPrefix(keySignerAttrValue, keyId, request.Attribute, request.Query)
}
defer closeIterator(it, &err)
before := request.At
if before.IsZero() {
before = time.Now()
}
for it.Next() {
cl, ok := kvSignerAttrValue(it.Key(), it.Value())
if !ok {
continue
}
if x.IsDeleted(cl.BlobRef) {
continue
}
if x.IsDeleted(cl.Permanode) {
continue
}
if cl.Date.After(before) {
continue
}
pnstr := cl.Permanode.String()
if seen[pnstr] {
continue
}
seen[pnstr] = true
dest <- cl.Permanode
if len(seen) == request.MaxResults {
break
}
}
return nil
}
func kvSignerAttrValue(k, v string) (c camtypes.Claim, ok bool) {
// TODO(bradfitz): garbage
keyPart := strings.Split(k, "|")
valPart := strings.Split(v, "|")
if len(keyPart) != 6 || len(valPart) != 1 {
// TODO(mpl): use glog
log.Printf("bogus keySignerAttrValue index entry: %q = %q", k, v)
return
}
if keyPart[0] != "signerattrvalue" {
return
}
date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[4]))
if err != nil {
log.Printf("bogus time in keySignerAttrValue index entry: %q", keyPart[4])
return
}
claimRef, ok := blob.Parse(keyPart[5])
if !ok {
log.Printf("bogus claim in keySignerAttrValue index entry: %q", keyPart[5])
return
}
permaNode, ok := blob.Parse(valPart[0])
if !ok {
log.Printf("bogus permanode in keySignerAttrValue index entry: %q", valPart[0])
return
}
return camtypes.Claim{
BlobRef: claimRef,
Permanode: permaNode,
Date: date,
Attr: urld(keyPart[2]),
Value: urld(keyPart[3]),
}, true
}
func (x *Index) PathsOfSignerTarget(ctx context.Context, signer, target blob.Ref) (paths []*camtypes.Path, err error) {
paths = []*camtypes.Path{}
keyId, err := x.KeyId(ctx, signer)
if err != nil {
if err == sorted.ErrNotFound {
err = nil
}
return
}
mostRecent := make(map[string]*camtypes.Path)
maxClaimDates := make(map[string]time.Time)
it := x.queryPrefix(keyPathBackward, keyId, target)
defer closeIterator(it, &err)
for it.Next() {
p, ok, active := kvPathBackward(it.Key(), it.Value())
if !ok {
continue
}
if x.IsDeleted(p.Claim) {
continue
}
if x.IsDeleted(p.Base) {
continue
}
key := p.Base.String() + "/" + p.Suffix
if p.ClaimDate.After(maxClaimDates[key]) {
maxClaimDates[key] = p.ClaimDate
if active {
mostRecent[key] = &p
} else {
delete(mostRecent, key)
}
}
}
for _, v := range mostRecent {
paths = append(paths, v)
}
return paths, nil
}
func kvPathBackward(k, v string) (p camtypes.Path, ok bool, active bool) {
// TODO(bradfitz): garbage