6,535 changes: 6,535 additions & 0 deletions result

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ go test -v -run='TestPushValueLogLimit' --manual=true
# Run the special Truncate test.
rm -rf p
go test -v -run='TestTruncateVlogNoClose$' --manual=true
truncate --size=4096 p/000000.vlog
truncate --size=4096 p/000000.wal
go test -v -run='TestTruncateVlogNoClose2$' --manual=true
go test -v -run='TestTruncateVlogNoClose3$' --manual=true
rm -rf p
Expand Down
872 changes: 599 additions & 273 deletions value.go

Large diffs are not rendered by default.

115 changes: 86 additions & 29 deletions value_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"github.com/dgraph-io/badger/v2/options"
"github.com/dgraph-io/badger/v2/y"
humanize "github.com/dustin/go-humanize"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/trace"
)
Expand Down Expand Up @@ -95,7 +96,12 @@ func TestValueBasic(t *testing.T) {
offset: b.Ptrs[1].Offset,
},
}, readEntries)

vlogFiles, err := getSuffixedFiles(dir, vlogSuffix)
require.NoError(t, err)
require.Equal(t, 1, len(vlogFiles))
walFiles, err := getSuffixedFiles(dir, walSuffix)
require.NoError(t, err)
assert.LessOrEqual(t, len(walFiles), 1)
}

func TestValueGCManaged(t *testing.T) {
Expand Down Expand Up @@ -184,9 +190,9 @@ func TestValueGC(t *testing.T) {
txnDelete(t, kv, []byte(fmt.Sprintf("key%d", i)))
}

kv.vlog.filesLock.RLock()
lf := kv.vlog.filesMap[kv.vlog.sortedFids()[0]]
kv.vlog.filesLock.RUnlock()
kv.vlog.vlog.filesLock.RLock()
lf := kv.vlog.vlog.filesMap[kv.vlog.vlog.sortedFids()[0]]
kv.vlog.vlog.filesLock.RUnlock()

// lf.iterate(0, func(e Entry) bool {
// e.print("lf")
Expand Down Expand Up @@ -242,9 +248,9 @@ func TestValueGC2(t *testing.T) {
txnSet(t, kv, []byte(fmt.Sprintf("key%d", i)), v, 0)
}

kv.vlog.filesLock.RLock()
lf := kv.vlog.filesMap[kv.vlog.sortedFids()[0]]
kv.vlog.filesLock.RUnlock()
kv.vlog.vlog.filesLock.RLock()
lf := kv.vlog.vlog.filesMap[kv.vlog.vlog.sortedFids()[0]]
kv.vlog.vlog.filesLock.RUnlock()

// lf.iterate(0, func(e Entry) bool {
// e.print("lf")
Expand Down Expand Up @@ -344,9 +350,9 @@ func TestValueGC3(t *testing.T) {

// Like other tests, we pull out a logFile to rewrite it directly

kv.vlog.filesLock.RLock()
logFile := kv.vlog.filesMap[kv.vlog.sortedFids()[0]]
kv.vlog.filesLock.RUnlock()
kv.vlog.vlog.filesLock.RLock()
logFile := kv.vlog.vlog.filesMap[kv.vlog.vlog.sortedFids()[0]]
kv.vlog.vlog.filesLock.RUnlock()

tr := trace.New("Test", "Test")
defer tr.Finish()
Expand Down Expand Up @@ -394,10 +400,10 @@ func TestValueGC4(t *testing.T) {
txnSet(t, kv, []byte(fmt.Sprintf("key%d", i)), v, 0)
}

kv.vlog.filesLock.RLock()
lf0 := kv.vlog.filesMap[kv.vlog.sortedFids()[0]]
lf1 := kv.vlog.filesMap[kv.vlog.sortedFids()[1]]
kv.vlog.filesLock.RUnlock()
kv.vlog.vlog.filesLock.RLock()
lf0 := kv.vlog.vlog.filesMap[kv.vlog.vlog.sortedFids()[0]]
lf1 := kv.vlog.vlog.filesMap[kv.vlog.vlog.sortedFids()[1]]
kv.vlog.vlog.filesLock.RUnlock()

// lf.iterate(0, func(e Entry) bool {
// e.print("lf")
Expand Down Expand Up @@ -535,7 +541,9 @@ func TestChecksums(t *testing.T) {
{Key: k2, Value: v2},
})
buf[len(buf)-1]++ // Corrupt last byte
// In this case, contents of both wal and vlog will be same.
require.NoError(t, ioutil.WriteFile(vlogFilePath(dir, 0), buf, 0777))
require.NoError(t, ioutil.WriteFile(walFilePath(dir, 0), buf, 0777))

// K1 should exist, but K2 shouldn't.
kv, err = Open(opts)
Expand Down Expand Up @@ -620,6 +628,8 @@ func TestPartialAppendToValueLog(t *testing.T) {
{Key: k2, Value: v2},
})
buf = buf[:len(buf)-6]

require.NoError(t, ioutil.WriteFile(walFilePath(dir, 0), buf, 0777))
require.NoError(t, ioutil.WriteFile(vlogFilePath(dir, 0), buf, 0777))

// Badger should now start up
Expand Down Expand Up @@ -688,7 +698,7 @@ func TestReadOnlyOpenWithPartialAppendToValueLog(t *testing.T) {
{Key: k2, Value: v2},
})
buf = buf[:len(buf)-6]
require.NoError(t, ioutil.WriteFile(vlogFilePath(dir, 0), buf, 0777))
require.NoError(t, ioutil.WriteFile(walFilePath(dir, 0), buf, 0777))

opts.ReadOnly = true
// Badger should fail a read-only open with values to replay
Expand Down Expand Up @@ -752,7 +762,7 @@ func createVlog(t *testing.T, entries []*Entry) []byte {
require.NoError(t, txn.Commit())
require.NoError(t, kv.Close())

filename := vlogFilePath(dir, 0)
filename := walFilePath(dir, 0)
buf, err := ioutil.ReadFile(filename)
require.NoError(t, err)
return buf
Expand All @@ -777,7 +787,7 @@ func TestPenultimateLogCorruption(t *testing.T) {
h.readRange(0, 7)

for i := 2; i >= 0; i-- {
fpath := vlogFilePath(dir, uint32(i))
fpath := walFilePath(dir, uint32(i))
fi, err := os.Stat(fpath)
require.NoError(t, err)
require.True(t, fi.Size() > 0, "Empty file at log=%d", i)
Expand Down Expand Up @@ -980,22 +990,22 @@ func TestValueLogTruncate(t *testing.T) {
return txn.Set([]byte("foo"), nil)
}))

fileCountBeforeCorruption := len(db.vlog.filesMap)
fileCountBeforeCorruption := len(db.vlog.wal.filesMap)

require.NoError(t, db.Close())

// Create two vlog files corrupted data. These will be truncated when DB starts next time
require.NoError(t, ioutil.WriteFile(vlogFilePath(dir, 1), []byte("foo"), 0664))
require.NoError(t, ioutil.WriteFile(vlogFilePath(dir, 2), []byte("foo"), 0664))
require.NoError(t, ioutil.WriteFile(walFilePath(dir, 1), []byte("foo"), 0664))
require.NoError(t, ioutil.WriteFile(walFilePath(dir, 2), []byte("foo"), 0664))

db, err = Open(DefaultOptions(dir).WithTruncate(true))
require.NoError(t, err)

// Ensure vlog file with id=1 is not present
require.Nil(t, db.vlog.filesMap[1])
require.Nil(t, db.vlog.wal.filesMap[1])

// Ensure filesize of fid=2 is zero
zeroFile, ok := db.vlog.filesMap[2]
zeroFile, ok := db.vlog.wal.filesMap[2]
require.True(t, ok)
fileStat, err := zeroFile.fd.Stat()
require.NoError(t, err)
Expand All @@ -1006,14 +1016,14 @@ func TestValueLogTruncate(t *testing.T) {
if runtime.GOOS == "windows" {
require.Equal(t, 2*db.opt.ValueLogFileSize, fileStat.Size())
} else {
require.Equal(t, int64(vlogHeaderSize), fileStat.Size())
require.Equal(t, int64(lfHeaderSize), fileStat.Size())
}
fileCountAfterCorruption := len(db.vlog.filesMap)
fileCountAfterCorruption := len(db.vlog.wal.filesMap)
// +1 because the file with id=2 will be completely truncated. It won't be deleted.
// There would be two files. fid=0 with valid data, fid=2 with zero data (truncated).
require.Equal(t, fileCountBeforeCorruption+1, fileCountAfterCorruption)
// Max file ID would point to the last vlog file, which is fid=2 in this case
require.Equal(t, 2, int(db.vlog.maxFid))
require.Equal(t, 2, int(db.vlog.wal.maxFid))
require.NoError(t, db.Close())
}

Expand Down Expand Up @@ -1073,6 +1083,7 @@ func TestSafeEntry(t *testing.T) {
func TestDiscardStatsMove(t *testing.T) {
dir, err := ioutil.TempDir("", "badger-test")
require.NoError(t, err)
defer os.Remove(dir)
ops := getTestOptions(dir)
ops.ValueLogMaxEntries = 1
db, err := Open(ops)
Expand Down Expand Up @@ -1105,18 +1116,20 @@ func TestDiscardStatsMove(t *testing.T) {
db.vlog.lfDiscardStats.Unlock()

// Push more entries so that we get more than 1 value log files.
val := make([]byte, ops.ValueThreshold)
rand.Read(val)
require.NoError(t, db.Update(func(txn *Txn) error {
e := NewEntry([]byte("f"), []byte("1"))
e := NewEntry([]byte("f"), val)
return txn.SetEntry(e)
}))
require.NoError(t, db.Update(func(txn *Txn) error {
e := NewEntry([]byte("ff"), []byte("1"))
e := NewEntry([]byte("ff"), val)
return txn.SetEntry(e)
}))

tr := trace.New("Badger.ValueLog", "GC")
// Use first value log file for GC. This value log file contains the discard stats.
lf := db.vlog.filesMap[0]
lf := db.vlog.vlog.filesMap[0]
require.NoError(t, db.vlog.rewrite(lf, tr))
require.NoError(t, db.Close())

Expand Down Expand Up @@ -1211,7 +1224,7 @@ func TestValueEntryChecksum(t *testing.T) {
require.Greater(t, len(v), db.opt.ValueThreshold)
txnSet(t, db, k, v, 0)

path := db.vlog.fpath(0)
path := db.vlog.fpath(0, vlogFile)
require.NoError(t, db.Close())

file, err := os.OpenFile(path, os.O_RDWR, 0644)
Expand Down Expand Up @@ -1301,3 +1314,47 @@ func TestValidateWrite(t *testing.T) {
err = log.validateWrites([]*request{req1, req})
require.Error(t, err)
}

func TestCheckNumberOfEntries(t *testing.T) {

dir, err := ioutil.TempDir("", "badger-test")
require.NoError(t, err)
defer removeDir(dir)
opt := getTestOptions(dir)

kv, _ := Open(opt)
defer kv.Close()

N := 10
sz := 32 << 10
v := make([]byte, sz)
rand.Read(v)
txn := kv.NewTransaction(true)
for i := 0; i < N; i++ {
require.NoError(t, txn.SetEntry(NewEntry([]byte(fmt.Sprintf("key%d", i)), v)))
if i%20 == 0 {
require.NoError(t, txn.Commit())
txn = kv.NewTransaction(true)
}
}
require.NoError(t, txn.Commit())

countEntries := func(lw *logWrapper) int {
lw.filesLock.RLock()
lf := lw.filesMap[lw.sortedFids()[0]]
lw.filesLock.RUnlock()

var count int
kv.vlog.iterate(lf, 0, func(e Entry, vp valuePointer) error {
count++
return nil
})
return count
}
// All entries go into single file.
require.Equal(t, 1, len(kv.vlog.vlog.filesMap))
require.Equal(t, 1, len(kv.vlog.wal.filesMap))
// wal contains 2 more entries which are transaction marks
require.Equal(t, N, countEntries(&kv.vlog.vlog))
require.Equal(t, N+2, countEntries(&kv.vlog.wal))
}