Skip to content

Commit

Permalink
[FAB-6645] update LTE to add JSON to CouchDB tests
Browse files Browse the repository at this point in the history
The CouchDB benchmarks currently use binary values only.
This change will provide the option for benchmarking CouchDB
with JSON values.

Change-Id: I9f49331b80607fda0ac8b45790ed3bd4beb4b936
Signed-off-by: Chris Elder <chris.elder@us.ibm.com>
  • Loading branch information
Chris Elder committed Dec 6, 2017
1 parent d79520f commit ecd85b4
Show file tree
Hide file tree
Showing 9 changed files with 166 additions and 34 deletions.
8 changes: 7 additions & 1 deletion test/tools/LTE/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,13 +111,19 @@ By default, the tests use golveldb as the state database. Fabric provides the
option of using CouchDB as a pluggable state database. To run the existing
tests with CouchDB, use the parameter file `parameters_couchdb_daily_CI.sh`:
```
./runbenchmark.sh -f parameters_couchdb_daily_CI.sh all
./runbenchmarks.sh -f parameters_couchdb_daily_CI.sh all
```
Note that this parameter file (`parameters_couchdb_daily_CI.sh`) contains the
following line, which is required to run the tests with CouchDB:
```
export useCouchDB="yes"
```
CouchDB can store values in JSON or binary formats. The following option in
`parameters_couchdb_daily_CI.sh` is used to switch between JSON and binary
values:
```
UseJSONFormat="true"
```

## How to View the Test Results

Expand Down
30 changes: 21 additions & 9 deletions test/tools/LTE/experiments/conf.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,24 @@ import (
// txConf captures the transaction related configurations
// numTotalTxs specifies the total transactions that should be executed and committed across chains
// numParallelTxsPerChain specifies the parallel transactions on each of the chains
// numKeysInEachTx specifies the number of keys that each of transactions should operate
// numWritesPerTx specifies the number of keys to write in each transaction
// numReadsPerTx specifies the number of keys to read in each transaction, Note: this parameters
// match the numWritesPerTx for normal benchmarks. This can be set to zero to make batch update measurements.
type txConf struct {
numTotalTxs int
numParallelTxsPerChain int
numKeysInEachTx int
numWritesPerTx int
numReadsPerTx int
}

// dataConf captures the data related configurations
// numKVs specifies number of total key-values across chains
// kvSize specifies the size of a key-value (in bytes)
// useJSON specifies if the value stored is in JSON format
type dataConf struct {
numKVs int
kvSize int
numKVs int
kvSize int
useJSON bool
}

// configuration captures all the configurations for an experiment
Expand All @@ -44,8 +49,8 @@ func defaultConf() *configuration {
conf := &configuration{}
conf.chainMgrConf = &chainmgmt.ChainMgrConf{DataDir: "/tmp/fabric/ledgerPerfTests", NumChains: 1}
conf.batchConf = &chainmgmt.BatchConf{BatchSize: 10, SignBlock: false}
conf.txConf = &txConf{numTotalTxs: 100000, numParallelTxsPerChain: 100, numKeysInEachTx: 4}
conf.dataConf = &dataConf{numKVs: 100000, kvSize: 200}
conf.txConf = &txConf{numTotalTxs: 100000, numParallelTxsPerChain: 100, numWritesPerTx: 4, numReadsPerTx: 4}
conf.dataConf = &dataConf{numKVs: 100000, kvSize: 200, useJSON: false}
return conf
}

Expand Down Expand Up @@ -76,8 +81,11 @@ func confFromTestParams(testParams []string) *configuration {
numTotalTxs := flags.Int("NumTotalTx",
conf.txConf.numTotalTxs, "Number of total transactions")

numKeysInEachTx := flags.Int("NumKeysInEachTx",
conf.txConf.numKeysInEachTx, "number of keys operated upon in each Tx")
numWritesPerTx := flags.Int("NumWritesPerTx",
conf.txConf.numWritesPerTx, "number of keys written in each Tx")

numReadsPerTx := flags.Int("NumReadsPerTx",
conf.txConf.numReadsPerTx, "number of keys to read in each Tx")

// batchConf
batchSize := flags.Int("BatchSize",
Expand All @@ -90,15 +98,19 @@ func confFromTestParams(testParams []string) *configuration {
kvSize := flags.Int("KVSize",
conf.dataConf.kvSize, "size of the key-value in bytes")

useJSON := flags.Bool("UseJSONFormat", conf.dataConf.useJSON, "should CouchDB use JSON for values")

flags.Parse(testParams)

conf.chainMgrConf.DataDir = *dataDir
conf.chainMgrConf.NumChains = *numChains
conf.txConf.numParallelTxsPerChain = *numParallelTxsPerChain
conf.txConf.numTotalTxs = *numTotalTxs
conf.txConf.numKeysInEachTx = *numKeysInEachTx
conf.txConf.numWritesPerTx = *numWritesPerTx
conf.txConf.numReadsPerTx = *numReadsPerTx
conf.batchConf.BatchSize = *batchSize
conf.dataConf.numKVs = *numKVs
conf.dataConf.kvSize = *kvSize
conf.dataConf.useJSON = *useJSON
return conf
}
14 changes: 10 additions & 4 deletions test/tools/LTE/experiments/insert_txs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,16 +65,22 @@ func runInsertClientsForChain(chain *chainmgmt.Chain) {
}

func runInsertClient(chain *chainmgmt.Chain, startKey, endKey int, wg *sync.WaitGroup) {
numKeysPerTx := conf.txConf.numKeysInEachTx
numWritesPerTx := conf.txConf.numWritesPerTx
kvSize := conf.dataConf.kvSize
useJSON := conf.dataConf.useJSON

currentKey := startKey
for currentKey <= endKey {
simulator, err := chain.NewTxSimulator(util.GenerateUUID())
common.PanicOnError(err)
for i := 0; i < numKeysPerTx; i++ {
common.PanicOnError(simulator.SetState(
chaincodeName, constructKey(currentKey), constructValue(currentKey, kvSize)))
for i := 0; i < numWritesPerTx; i++ {
if useJSON {
common.PanicOnError(simulator.SetState(
chaincodeName, constructKey(currentKey), constructJSONValue(currentKey, kvSize)))
} else {
common.PanicOnError(simulator.SetState(
chaincodeName, constructKey(currentKey), constructValue(currentKey, kvSize)))
}
currentKey++
if currentKey > endKey {
break
Expand Down
31 changes: 25 additions & 6 deletions test/tools/LTE/experiments/readwrite_txs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,19 +66,38 @@ func runReadWriteClientsForChain(chain *chainmgmt.Chain) {
}

func runReadWriteClient(chain *chainmgmt.Chain, rand *rand.Rand, numTx int, wg *sync.WaitGroup) {
numKeysPerTx := conf.txConf.numKeysInEachTx
numWritesPerTx := conf.txConf.numWritesPerTx
numReadsPerTx := conf.txConf.numReadsPerTx
maxKeyNumber := calculateShare(conf.dataConf.numKVs, conf.chainMgrConf.NumChains, int(chain.ID))
kvSize := conf.dataConf.kvSize
useJSON := conf.dataConf.useJSON
var value []byte

for i := 0; i < numTx; i++ {
simulator, err := chain.NewTxSimulator(util.GenerateUUID())
common.PanicOnError(err)
for i := 0; i < numKeysPerTx; i++ {
for i := 0; i < numWritesPerTx; i++ {
keyNumber := rand.Intn(maxKeyNumber)
key := constructKey(keyNumber)
value, err := simulator.GetState(chaincodeName, key)
common.PanicOnError(err)
if !verifyValue(keyNumber, value) {
panic(fmt.Errorf("Value %s is not expected for key number %d", value, keyNumber))
// check to see if the number of reads is exceeded
if i < numReadsPerTx-1 {
value, err = simulator.GetState(chaincodeName, key)
common.PanicOnError(err)
if useJSON {
if !verifyJSONValue(keyNumber, value) {
panic(fmt.Errorf("Value %s is not expected for key number %d", value, keyNumber))
}
} else {
if !verifyValue(keyNumber, value) {
panic(fmt.Errorf("Value %s is not expected for key number %d", value, keyNumber))
}
}
} else {
if useJSON {
value = []byte(constructJSONValue(keyNumber, kvSize))
} else {
value = []byte(constructValue(keyNumber, kvSize))
}
}
common.PanicOnError(simulator.SetState(chaincodeName, key, value))
}
Expand Down
86 changes: 86 additions & 0 deletions test/tools/LTE/experiments/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,24 +18,90 @@ package experiments

import (
"bytes"
"encoding/json"
"fmt"
"math/rand"
"strconv"

logging "github.com/op/go-logging"
)

var logger = logging.MustGetLogger("experiments")

type marbleRecord struct {
ID string `json:"_id,omitempty"`
Rev string `json:"_rev,omitempty"`
Prefix string `json:"prefix,omitempty"`
AssetType string `json:"asset_type,omitempty"`
AssetName string `json:"asset_name,omitempty"`
Color string `json:"color,omitempty"`
Size int `json:"size,omitempty"`
Owner string `json:"owner,omitempty"`
DataPadding string `json:"datapadding,omitempty"`
}

var colors = []string{
"red",
"green",
"purple",
"yellow",
"white",
"black",
}

var owners = []string{
"fred",
"jerry",
"tom",
"alice",
"kim",
"angela",
"john",
}

//TestValue is a struct for holding the test value
type TestValue struct {
Value string
}

func constructKey(keyNumber int) string {
return fmt.Sprintf("%s%09d", "key_", keyNumber)
}

func constructValue(keyNumber int, kvSize int) []byte {
prefix := constructValuePrefix(keyNumber)
randomBytes := constructRandomBytes(kvSize - len(prefix))

return append(prefix, randomBytes...)
}

func constructJSONValue(keyNumber int, kvSize int) []byte {

prefix := constructValuePrefix(keyNumber)

rand.Seed(int64(keyNumber))
color := colors[rand.Intn(len(colors))]
size := rand.Intn(len(colors))*10 + 10
owner := owners[rand.Intn(len(owners))]
assetName := "marble" + strconv.Itoa(keyNumber)

testRecord := marbleRecord{Prefix: string(prefix), AssetType: "marble", AssetName: assetName, Color: color, Size: size, Owner: owner}

jsonValue, _ := json.Marshal(testRecord)

if kvSize > len(jsonValue) {
randomJSONBytes := constructRandomBytes(kvSize - len(jsonValue))

//add in extra bytes
testRecord.DataPadding = string(randomJSONBytes)

jsonValue, _ = json.Marshal(testRecord)
}

return jsonValue

}

func constructValuePrefix(keyNumber int) []byte {
return []byte(fmt.Sprintf("%s%09d", "value_", keyNumber))
}
Expand All @@ -45,7 +111,27 @@ func verifyValue(keyNumber int, value []byte) bool {
if len(value) < len(prefix) {
return false
}

return bytes.Equal(value[:len(prefix)], prefix)

}

func verifyJSONValue(keyNumber int, value []byte) bool {
prefix := constructValuePrefix(keyNumber)
if len(value) < len(prefix) {
return false
}

var marble marbleRecord

json.Unmarshal(value, &marble)

if len(value) < len(prefix) {
return false
}

valuePrefix := []byte(marble.Prefix)
return bytes.Equal(valuePrefix, prefix)
}

func disableLogging() {
Expand Down
4 changes: 2 additions & 2 deletions test/tools/LTE/scripts/benchmarks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ source ./common.sh
PKG_NAME="github.com/hyperledger/fabric/test/tools/LTE/experiments"

function setCommonTestParams {
TEST_PARAMS="-DataDir=$DataDir, -NumChains=$NumChains, -NumParallelTxPerChain=$NumParallelTxPerChain, -NumKeysInEachTx=$NumKeysInEachTx, -BatchSize=$BatchSize, -NumKVs=$NumKVs, -KVSize=$KVSize"
TEST_PARAMS="-DataDir=$DataDir, -NumChains=$NumChains, -NumParallelTxPerChain=$NumParallelTxPerChain, -NumWritesPerTx=$NumWritesPerTx, -NumReadsPerTx=$NumReadsPerTx, -BatchSize=$BatchSize, -NumKVs=$NumKVs, -KVSize=$KVSize, -UseJSONFormat=$UseJSONFormat"
RESULTANT_DIRS="$DataDir/ledgersData/chains/chains $DataDir/ledgersData/chains/index $DataDir/ledgersData/stateLeveldb $DataDir/ledgersData/historyLeveldb"
}

Expand All @@ -36,7 +36,7 @@ function runReadWriteTxs {
FUNCTION_NAME="BenchmarkReadWriteTxs"
if [ "$CLEAR_OS_CACHE" == "true" ]; then
clearOSCache
fi
fi
setCommonTestParams
TEST_PARAMS="$TEST_PARAMS, -NumTotalTx=$NumTotalTx"
executeTest
Expand Down
8 changes: 5 additions & 3 deletions test/tools/LTE/scripts/parameters_couchdb_daily_CI.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@
#

export useCouchDB="yes"
UseJSONFormat="true"
DataDir="/tmp/fabric/test/tools/LTE/data"
NumChains=10
NumParallelTxPerChain=10
NumKVs=10000
NumTotalTx=10000
NumKeysInEachTx=4
NumWritesPerTx=4
NumReadsPerTx=4
BatchSize=50
KVSize=200

Expand All @@ -25,12 +27,12 @@ KVSize=200
# NumParallelTxPerChain=10
# NumKVs=10000
# NumTotalTx=10000
# NumKeysInEachTx=4
# NumWritesPerTx=4
# BatchSize=50
# KVSize=200
ArrayNumParallelTxPerChain=(1 5 10 20 50 100)
ArrayNumChains=(1 5 10 20 50)
ArrayNumKeysInEachTx=(1 2 5 10 20)
ArrayNumWritesPerTx=(1 2 5 10 20)
ArrayKVSize=(100 200 500 1000 2000)
ArrayBatchSize=(10 20 100 500)
ArrayNumParallelTxWithSingleChain=(1 5 10 20 50 100)
Expand Down
5 changes: 3 additions & 2 deletions test/tools/LTE/scripts/parameters_daily_CI.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ NumChains=10
NumParallelTxPerChain=10
NumKVs=100000
NumTotalTx=100000
NumKeysInEachTx=4
NumWritesPerTx=4
NumReadsPerTx=4
BatchSize=50
KVSize=200

Expand All @@ -28,7 +29,7 @@ KVSize=200
# KVSize=200
ArrayNumParallelTxPerChain=(1 5 10 20 50 100 500 2000)
ArrayNumChains=(1 5 10 20 50 100 500 2000)
ArrayNumKeysInEachTx=(1 2 5 10 20)
ArrayNumWritesPerTx=(1 2 5 10 20)
ArrayKVSize=(100 200 500 1000 2000)
ArrayBatchSize=(10 20 100 500)
ArrayNumParallelTxWithSingleChain=(1 5 10 20 50 100 500 2000)
Expand Down
14 changes: 7 additions & 7 deletions test/tools/LTE/scripts/runbenchmarks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ function varyNumChains {
done
}

function varyNumKeysInEachTx {
for v in "${ArrayNumKeysInEachTx[@]}"
function varyNumWritesPerTx {
for v in "${ArrayNumWritesPerTx[@]}"
do
NumKeysInEachTx=$v
NumWritesPerTx=$v
rm -rf $DataDir;runInsertTxs;runReadWriteTxs
done
}
Expand Down Expand Up @@ -113,7 +113,7 @@ varyNumParallelTxPerChain
varyNumChains
varyNumParallelTxWithSingleChain
varyNumChainsWithNoParallelism
varyNumKeysInEachTx
varyNumWritesPerTx
varyKVSize
varyBatchSize
varyNumTxs
Expand Down Expand Up @@ -158,8 +158,8 @@ case $1 in
varyNumParallelTxWithSingleChain ;;
varyNumChainsWithNoParallelism)
varyNumChainsWithNoParallelism ;;
varyNumKeysInEachTx)
varyNumKeysInEachTx ;;
varyNumWritesPerTx)
varyNumWritesPerTx ;;
varyKVSize)
varyKVSize ;;
varyBatchSize)
Expand All @@ -176,7 +176,7 @@ case $1 in
varyNumChains
varyNumParallelTxWithSingleChain
varyNumChainsWithNoParallelism
varyNumKeysInEachTx
varyNumWritesPerTx
varyKVSize
varyBatchSize
varyNumTxs
Expand Down

0 comments on commit ecd85b4

Please sign in to comment.