Skip to content

Commit 8dfb102

Browse files
committed
db: use synctest in TestFlushDelay[Stress]
Use the new synctest package to marginally speed up TestFlushDelay and TestFlushDelayStress through the fast-forwarding of time while waiting for a flush to trigger.
1 parent fc0be3c commit 8dfb102

File tree

1 file changed

+148
-143
lines changed

1 file changed

+148
-143
lines changed

range_del_test.go

Lines changed: 148 additions & 143 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ import (
1313
"strings"
1414
"sync"
1515
"testing"
16+
"testing/synctest"
1617
"time"
1718

1819
"github.com/cockroachdb/datadriven"
@@ -101,157 +102,161 @@ func TestRangeDel(t *testing.T) {
101102
}
102103

103104
func TestFlushDelay(t *testing.T) {
104-
opts := &Options{
105-
FS: vfs.NewMem(),
106-
Comparer: testkeys.Comparer,
107-
FlushDelayDeleteRange: 10 * time.Millisecond,
108-
FlushDelayRangeKey: 10 * time.Millisecond,
109-
FormatMajorVersion: internalFormatNewest,
110-
Logger: testutils.Logger{T: t},
111-
}
112-
d, err := Open("", opts)
113-
require.NoError(t, err)
105+
synctest.Test(t, func(t *testing.T) {
106+
opts := &Options{
107+
FS: vfs.NewMem(),
108+
Comparer: testkeys.Comparer,
109+
FlushDelayDeleteRange: 10 * time.Millisecond,
110+
FlushDelayRangeKey: 10 * time.Millisecond,
111+
FormatMajorVersion: internalFormatNewest,
112+
Logger: testutils.Logger{T: t},
113+
}
114+
d, err := Open("", opts)
115+
require.NoError(t, err)
114116

115-
// Ensure that all the various means of writing a rangedel or range key
116-
// trigger their respective flush delays.
117-
cases := []func(){
118-
func() {
119-
require.NoError(t, d.DeleteRange([]byte("a"), []byte("z"), nil))
120-
},
121-
func() {
122-
b := d.NewBatch()
123-
require.NoError(t, b.DeleteRange([]byte("a"), []byte("z"), nil))
124-
require.NoError(t, b.Commit(nil))
125-
},
126-
func() {
127-
b := d.NewBatch()
128-
op := b.DeleteRangeDeferred(1, 1)
129-
op.Key[0] = 'a'
130-
op.Value[0] = 'z'
131-
op.Finish()
132-
require.NoError(t, b.Commit(nil))
133-
},
134-
func() {
135-
b := d.NewBatch()
136-
b2 := d.NewBatch()
137-
require.NoError(t, b.DeleteRange([]byte("a"), []byte("z"), nil))
138-
require.NoError(t, b2.SetRepr(b.Repr()))
139-
require.NoError(t, b2.Commit(nil))
140-
require.NoError(t, b.Close())
141-
},
142-
func() {
143-
b := d.NewBatch()
144-
b2 := d.NewBatch()
145-
require.NoError(t, b.DeleteRange([]byte("a"), []byte("z"), nil))
146-
require.NoError(t, b2.Apply(b, nil))
147-
require.NoError(t, b2.Commit(nil))
148-
require.NoError(t, b.Close())
149-
},
150-
func() {
151-
require.NoError(t, d.RangeKeySet([]byte("a"), []byte("z"), nil, nil, nil))
152-
},
153-
func() {
154-
require.NoError(t, d.RangeKeyUnset([]byte("a"), []byte("z"), nil, nil))
155-
},
156-
func() {
157-
require.NoError(t, d.RangeKeyDelete([]byte("a"), []byte("z"), nil))
158-
},
159-
func() {
160-
b := d.NewBatch()
161-
require.NoError(t, b.RangeKeySet([]byte("a"), []byte("z"), nil, nil, nil))
162-
require.NoError(t, b.Commit(nil))
163-
},
164-
func() {
165-
b := d.NewBatch()
166-
require.NoError(t, b.RangeKeyUnset([]byte("a"), []byte("z"), nil, nil))
167-
require.NoError(t, b.Commit(nil))
168-
},
169-
func() {
170-
b := d.NewBatch()
171-
require.NoError(t, b.RangeKeyDelete([]byte("a"), []byte("z"), nil))
172-
require.NoError(t, b.Commit(nil))
173-
},
174-
func() {
175-
b := d.NewBatch()
176-
b2 := d.NewBatch()
177-
require.NoError(t, b.RangeKeySet([]byte("a"), []byte("z"), nil, nil, nil))
178-
require.NoError(t, b2.SetRepr(b.Repr()))
179-
require.NoError(t, b2.Commit(nil))
180-
require.NoError(t, b.Close())
181-
},
182-
func() {
183-
b := d.NewBatch()
184-
b2 := d.NewBatch()
185-
require.NoError(t, b.RangeKeySet([]byte("a"), []byte("z"), nil, nil, nil))
186-
require.NoError(t, b2.Apply(b, nil))
187-
require.NoError(t, b2.Commit(nil))
188-
require.NoError(t, b.Close())
189-
},
190-
}
117+
// Ensure that all the various means of writing a rangedel or range key
118+
// trigger their respective flush delays.
119+
cases := []func(){
120+
func() {
121+
require.NoError(t, d.DeleteRange([]byte("a"), []byte("z"), nil))
122+
},
123+
func() {
124+
b := d.NewBatch()
125+
require.NoError(t, b.DeleteRange([]byte("a"), []byte("z"), nil))
126+
require.NoError(t, b.Commit(nil))
127+
},
128+
func() {
129+
b := d.NewBatch()
130+
op := b.DeleteRangeDeferred(1, 1)
131+
op.Key[0] = 'a'
132+
op.Value[0] = 'z'
133+
op.Finish()
134+
require.NoError(t, b.Commit(nil))
135+
},
136+
func() {
137+
b := d.NewBatch()
138+
b2 := d.NewBatch()
139+
require.NoError(t, b.DeleteRange([]byte("a"), []byte("z"), nil))
140+
require.NoError(t, b2.SetRepr(b.Repr()))
141+
require.NoError(t, b2.Commit(nil))
142+
require.NoError(t, b.Close())
143+
},
144+
func() {
145+
b := d.NewBatch()
146+
b2 := d.NewBatch()
147+
require.NoError(t, b.DeleteRange([]byte("a"), []byte("z"), nil))
148+
require.NoError(t, b2.Apply(b, nil))
149+
require.NoError(t, b2.Commit(nil))
150+
require.NoError(t, b.Close())
151+
},
152+
func() {
153+
require.NoError(t, d.RangeKeySet([]byte("a"), []byte("z"), nil, nil, nil))
154+
},
155+
func() {
156+
require.NoError(t, d.RangeKeyUnset([]byte("a"), []byte("z"), nil, nil))
157+
},
158+
func() {
159+
require.NoError(t, d.RangeKeyDelete([]byte("a"), []byte("z"), nil))
160+
},
161+
func() {
162+
b := d.NewBatch()
163+
require.NoError(t, b.RangeKeySet([]byte("a"), []byte("z"), nil, nil, nil))
164+
require.NoError(t, b.Commit(nil))
165+
},
166+
func() {
167+
b := d.NewBatch()
168+
require.NoError(t, b.RangeKeyUnset([]byte("a"), []byte("z"), nil, nil))
169+
require.NoError(t, b.Commit(nil))
170+
},
171+
func() {
172+
b := d.NewBatch()
173+
require.NoError(t, b.RangeKeyDelete([]byte("a"), []byte("z"), nil))
174+
require.NoError(t, b.Commit(nil))
175+
},
176+
func() {
177+
b := d.NewBatch()
178+
b2 := d.NewBatch()
179+
require.NoError(t, b.RangeKeySet([]byte("a"), []byte("z"), nil, nil, nil))
180+
require.NoError(t, b2.SetRepr(b.Repr()))
181+
require.NoError(t, b2.Commit(nil))
182+
require.NoError(t, b.Close())
183+
},
184+
func() {
185+
b := d.NewBatch()
186+
b2 := d.NewBatch()
187+
require.NoError(t, b.RangeKeySet([]byte("a"), []byte("z"), nil, nil, nil))
188+
require.NoError(t, b2.Apply(b, nil))
189+
require.NoError(t, b2.Commit(nil))
190+
require.NoError(t, b.Close())
191+
},
192+
}
191193

192-
for _, f := range cases {
193-
d.mu.Lock()
194-
flushed := d.mu.mem.queue[len(d.mu.mem.queue)-1].flushed
195-
d.mu.Unlock()
196-
f()
197-
<-flushed
198-
}
199-
require.NoError(t, d.Close())
194+
for _, f := range cases {
195+
d.mu.Lock()
196+
flushed := d.mu.mem.queue[len(d.mu.mem.queue)-1].flushed
197+
d.mu.Unlock()
198+
f()
199+
<-flushed
200+
}
201+
require.NoError(t, d.Close())
202+
})
200203
}
201204

202205
func TestFlushDelayStress(t *testing.T) {
203-
rng := rand.New(rand.NewPCG(0, uint64(time.Now().UnixNano())))
204-
opts := &Options{
205-
FS: vfs.NewMem(),
206-
Comparer: testkeys.Comparer,
207-
FlushDelayDeleteRange: time.Duration(rng.IntN(10)+1) * time.Millisecond,
208-
FlushDelayRangeKey: time.Duration(rng.IntN(10)+1) * time.Millisecond,
209-
FormatMajorVersion: internalFormatNewest,
210-
MemTableSize: 8192,
211-
Logger: testutils.Logger{T: t},
212-
}
213-
214-
runs := 100
215-
if buildtags.SlowBuild {
216-
runs = 5
217-
}
218-
for run := 0; run < runs; run++ {
219-
d, err := Open("", opts)
220-
require.NoError(t, err)
206+
synctest.Test(t, func(t *testing.T) {
207+
rng := rand.New(rand.NewPCG(0, uint64(time.Now().UnixNano())))
208+
opts := &Options{
209+
FS: vfs.NewMem(),
210+
Comparer: testkeys.Comparer,
211+
FlushDelayDeleteRange: time.Duration(rng.IntN(10)+1) * time.Millisecond,
212+
FlushDelayRangeKey: time.Duration(rng.IntN(10)+1) * time.Millisecond,
213+
FormatMajorVersion: internalFormatNewest,
214+
MemTableSize: 8192,
215+
Logger: testutils.Logger{T: t},
216+
}
221217

222-
now := time.Now().UnixNano()
223-
writers := runtime.GOMAXPROCS(0)
224-
var wg sync.WaitGroup
225-
for i := range writers {
226-
rng := rand.New(rand.NewPCG(0, uint64(now)+uint64(i)))
227-
wg.Go(func() {
228-
const ops = 100
229-
230-
var k1, k2 [32]byte
231-
for range ops {
232-
switch rng.IntN(3) {
233-
case 0:
234-
randStr(k1[:], rng)
235-
randStr(k2[:], rng)
236-
require.NoError(t, d.DeleteRange(k1[:], k2[:], nil))
237-
case 1:
238-
randStr(k1[:], rng)
239-
randStr(k2[:], rng)
240-
require.NoError(t, d.RangeKeySet(k1[:], k2[:], []byte("@2"), nil, nil))
241-
case 2:
242-
randStr(k1[:], rng)
243-
randStr(k2[:], rng)
244-
require.NoError(t, d.Set(k1[:], k2[:], nil))
245-
default:
246-
panic("unreachable")
218+
runs := 100
219+
if buildtags.SlowBuild {
220+
runs = 5
221+
}
222+
for run := 0; run < runs; run++ {
223+
d, err := Open("", opts)
224+
require.NoError(t, err)
225+
226+
now := time.Now().UnixNano()
227+
writers := runtime.GOMAXPROCS(0)
228+
var wg sync.WaitGroup
229+
for i := range writers {
230+
rng := rand.New(rand.NewPCG(0, uint64(now)+uint64(i)))
231+
wg.Go(func() {
232+
const ops = 100
233+
234+
var k1, k2 [32]byte
235+
for range ops {
236+
switch rng.IntN(3) {
237+
case 0:
238+
randStr(k1[:], rng)
239+
randStr(k2[:], rng)
240+
require.NoError(t, d.DeleteRange(k1[:], k2[:], nil))
241+
case 1:
242+
randStr(k1[:], rng)
243+
randStr(k2[:], rng)
244+
require.NoError(t, d.RangeKeySet(k1[:], k2[:], []byte("@2"), nil, nil))
245+
case 2:
246+
randStr(k1[:], rng)
247+
randStr(k2[:], rng)
248+
require.NoError(t, d.Set(k1[:], k2[:], nil))
249+
default:
250+
panic("unreachable")
251+
}
247252
}
248-
}
249-
})
253+
})
254+
}
255+
time.Sleep(time.Duration(rng.IntN(10)+1) * time.Millisecond)
256+
wg.Wait()
257+
require.NoError(t, d.Close())
250258
}
251-
wg.Wait()
252-
time.Sleep(time.Duration(rng.IntN(10)+1) * time.Millisecond)
253-
require.NoError(t, d.Close())
254-
}
259+
})
255260
}
256261

257262
// Verify that range tombstones at higher levels do not unintentionally delete

0 commit comments

Comments
 (0)