@@ -23,13 +23,15 @@ import (
23
23
"fmt"
24
24
"math/rand/v2"
25
25
"reflect"
26
+ "slices"
26
27
"strconv"
27
28
"sync"
28
29
"sync/atomic"
29
30
"testing"
30
31
"time"
31
32
32
33
"github.com/cockroachdb/pebble/internal/base"
34
+ "github.com/cockroachdb/pebble/internal/testkeys"
33
35
"github.com/cockroachdb/pebble/internal/testutils"
34
36
"github.com/stretchr/testify/require"
35
37
)
@@ -674,6 +676,99 @@ func randomKey(rng *rand.Rand, b []byte) base.InternalKey {
674
676
return base.InternalKey {UserKey : b }
675
677
}
676
678
679
+ // randomTestkeysSkiplist creates an arena skiplist with an arena of the given
680
+ // size. It fills the arena with random keys and values. It returns the skiplist
681
+ // with n keys and a slice of 2n+1 sorted keys, alternating between keys not in
682
+ // the skiplist and keys in the skiplist. The even-indexed keys can be used as
683
+ // seek keys to test searches that do not find exact key matches.
684
+ func randomTestkeysSkiplist (rng * rand.Rand , size int ) (* Skiplist , [][]byte ) {
685
+ ks := testkeys .Alpha (5 )
686
+ keys := make ([][]byte , 2 * size + 1 )
687
+ for i := range keys {
688
+ keys [i ] = testkeys .Key (ks , int64 (i ))
689
+ }
690
+ slices .SortFunc (keys , testkeys .Comparer .Compare )
691
+
692
+ l := NewSkiplist (newArena (uint32 (size )), testkeys .Comparer .Compare )
693
+ var n int
694
+ for n = 1 ; n < len (keys ); n += 2 {
695
+ value := testutils .RandBytes (rng , rng .IntN (90 )+ 10 )
696
+ k := base .MakeInternalKey (keys [n ], base .SeqNum (n ), base .InternalKeyKindSet )
697
+ err := l .Add (k , value )
698
+ if err == ErrArenaFull {
699
+ break
700
+ } else if err != nil {
701
+ panic (err )
702
+ }
703
+ }
704
+ return l , keys [:n ]
705
+ }
706
+
707
+ func TestSkiplistFindSplice (t * testing.T ) {
708
+ seed := int64 (time .Now ().UnixNano ())
709
+ t .Logf ("seed: %d" , seed )
710
+ rng := rand .New (rand .NewPCG (0 , uint64 (seed )))
711
+ skl , keys := randomTestkeysSkiplist (rng , 4 << 20 )
712
+
713
+ // Build a slice of all the the nodes.
714
+ var nodes []* node
715
+ for n := skl .head ; n != skl .tail ; n = skl .getNext (n , 0 ) {
716
+ nodes = append (nodes , n )
717
+ }
718
+ nodes = append (nodes , skl .tail )
719
+
720
+ // Check that the nodes match the odd-indexed keys.
721
+ // NB: We skip the head and tail nodes.
722
+ for i , n := range nodes [1 : len (nodes )- 1 ] {
723
+ require .Equal (t , keys [2 * i + 1 ], n .getKeyBytes (skl .arena ))
724
+ }
725
+
726
+ var ins Inserter
727
+ for i , key := range keys {
728
+ // Only the odd-indexed keys are in the skiplist, so the index of the
729
+ // node we expect to find is i / 2.
730
+ ni := i / 2
731
+ skl .findSplice (base .MakeSearchKey (key ), & ins )
732
+
733
+ // Check that the prev and next nodes match the expected adjacent nodes.
734
+ if nodes [ni ] != ins .spl [0 ].prev {
735
+ t .Errorf ("prev: %s != %s" , nodes [ni ].getKeyBytes (skl .arena ),
736
+ ins .spl [0 ].prev .getKeyBytes (skl .arena ))
737
+ }
738
+ if nodes [ni + 1 ] != ins .spl [0 ].next {
739
+ t .Errorf ("next: %s != %s" , nodes [ni + 1 ].getKeyBytes (skl .arena ),
740
+ ins .spl [0 ].next .getKeyBytes (skl .arena ))
741
+ }
742
+
743
+ // At every height, ensure that the splice brackets the key and is
744
+ // tight.
745
+ for j := 0 ; j < int (ins .height ); j ++ {
746
+ spl := & ins .spl [j ]
747
+ if prevNext := skl .getNext (spl .prev , j ); prevNext != spl .next {
748
+ t .Errorf ("level %d; spl = (%q,%q); prevNext = %q" , j ,
749
+ spl .prev .getKeyBytes (skl .arena ), spl .next .getKeyBytes (skl .arena ), prevNext .getKeyBytes (skl .arena ))
750
+ }
751
+ if nextPrev := skl .getPrev (spl .next , j ); nextPrev != spl .prev {
752
+ t .Errorf ("level %d; spl = (%q,%q); nextPrev = %q" , j ,
753
+ spl .prev .getKeyBytes (skl .arena ), spl .next .getKeyBytes (skl .arena ), nextPrev .getKeyBytes (skl .arena ))
754
+ }
755
+ if spl .prev != skl .head && testkeys .Comparer .Compare (spl .prev .getKeyBytes (skl .arena ), key ) >= 0 {
756
+ t .Errorf ("level %d; spl = (%q,%q); prev is not before key %q" , j ,
757
+ spl .prev .getKeyBytes (skl .arena ), spl .next .getKeyBytes (skl .arena ), key )
758
+ }
759
+ if spl .next != skl .tail && testkeys .Comparer .Compare (spl .next .getKeyBytes (skl .arena ), key ) < 0 {
760
+ t .Errorf ("level %d; spl = (%q,%q); next is not after key %q" , j ,
761
+ spl .prev .getKeyBytes (skl .arena ), spl .next .getKeyBytes (skl .arena ), key )
762
+ }
763
+ }
764
+
765
+ // Sometimes reset the Inserter to test a non-cached splice.
766
+ if rng .IntN (2 ) == 1 {
767
+ ins = Inserter {}
768
+ }
769
+ }
770
+ }
771
+
677
772
// Standard test. Some fraction is read. Some fraction is write. Writes have
678
773
// to go through mutex lock.
679
774
func BenchmarkReadWrite (b * testing.B ) {
0 commit comments