diff --git a/core/ledger/kvledger/history/historydb/historyleveldb/historyleveldb.go b/core/ledger/kvledger/history/historydb/historyleveldb/historyleveldb.go index 4c0cabe852a..e5b59dd29c8 100644 --- a/core/ledger/kvledger/history/historydb/historyleveldb/historyleveldb.go +++ b/core/ledger/kvledger/history/historydb/historyleveldb/historyleveldb.go @@ -21,7 +21,7 @@ import ( "github.com/hyperledger/fabric/common/ledger/util/leveldbhelper" "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/ledger/kvledger/history/historydb" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwset" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" "github.com/hyperledger/fabric/core/ledger/ledgerconfig" "github.com/hyperledger/fabric/core/ledger/util" @@ -137,19 +137,19 @@ func (historyDB *historyDB) Commit(block *common.Block) error { } //preparation for extracting RWSet from transaction - txRWSet := &rwset.TxReadWriteSet{} + txRWSet := &rwsetutil.TxRwSet{} // Get the Result from the Action and then Unmarshal // it into a TxReadWriteSet using custom unmarshalling - if err = txRWSet.Unmarshal(respPayload.Results); err != nil { + if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil { return err } // for each transaction, loop through the namespaces and writesets // and add a history record for each write - for _, nsRWSet := range txRWSet.NsRWs { + for _, nsRWSet := range txRWSet.NsRwSets { ns := nsRWSet.NameSpace - for _, kvWrite := range nsRWSet.Writes { + for _, kvWrite := range nsRWSet.KvRwSet.Writes { writeKey := kvWrite.Key //composite key for history records is in the form ns~key~blockNo~tranNo diff --git a/core/ledger/kvledger/history/historydb/historyleveldb/historyleveldb_query_executer.go b/core/ledger/kvledger/history/historydb/historyleveldb/historyleveldb_query_executer.go index 6eced56acd7..2e2ebec61c3 100644 --- a/core/ledger/kvledger/history/historydb/historyleveldb/historyleveldb_query_executer.go +++ b/core/ledger/kvledger/history/historydb/historyleveldb/historyleveldb_query_executer.go @@ -24,7 +24,7 @@ import ( "github.com/hyperledger/fabric/common/ledger/util" "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/ledger/kvledger/history/historydb" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwset" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" "github.com/hyperledger/fabric/core/ledger/ledgerconfig" "github.com/hyperledger/fabric/protos/common" putils "github.com/hyperledger/fabric/protos/utils" @@ -129,19 +129,19 @@ func getTxIDandKeyWriteValueFromTran( txID := chdr.TxId - txRWSet := &rwset.TxReadWriteSet{} + txRWSet := &rwsetutil.TxRwSet{} // Get the Result from the Action and then Unmarshal // it into a TxReadWriteSet using custom unmarshalling - if err = txRWSet.Unmarshal(respPayload.Results); err != nil { + if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil { return txID, nil, err } // look for the namespace and key by looping through the transaction's ReadWriteSets - for _, nsRWSet := range txRWSet.NsRWs { + for _, nsRWSet := range txRWSet.NsRwSets { if nsRWSet.NameSpace == namespace { // got the correct namespace, now find the key write - for _, kvWrite := range nsRWSet.Writes { + for _, kvWrite := range nsRWSet.KvRwSet.Writes { if kvWrite.Key == key { return txID, kvWrite.Value, nil } diff --git a/core/ledger/kvledger/txmgmt/rwset/rwset.go b/core/ledger/kvledger/txmgmt/rwset/rwset.go deleted file mode 100644 index a39e8eb1e78..00000000000 --- a/core/ledger/kvledger/txmgmt/rwset/rwset.go +++ /dev/null @@ -1,480 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rwset - -import ( - "bytes" - "fmt" - - "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" -) - -// KVRead - a tuple of key and its version at the time of transaction simulation -type KVRead struct { - Key string - Version *version.Height -} - -// NewKVRead constructs a new `KVRead` -func NewKVRead(key string, version *version.Height) *KVRead { - return &KVRead{key, version} -} - -// KVWrite - a tuple of key and it's value that a transaction wants to set during simulation. -// In addition, IsDelete is set to true iff the operation performed on the key is a delete operation -type KVWrite struct { - Key string - IsDelete bool - Value []byte -} - -// NewKVWrite constructs a new `KVWrite` -func NewKVWrite(key string, value []byte) *KVWrite { - return &KVWrite{key, value == nil, value} -} - -// SetValue sets the new value for the key -func (w *KVWrite) SetValue(value []byte) { - w.Value = value - w.IsDelete = value == nil -} - -// RangeQueryInfo captures a range query executed by a transaction -// and the tuples that are read by the transaction -// This it to be used to perform a phantom-read validation during commit -type RangeQueryInfo struct { - StartKey string - EndKey string - ItrExhausted bool - Results []*KVRead - ResultHash *MerkleSummary -} - -// MerkleTreeLevel used for representing a level of the merkle tree -type MerkleTreeLevel int - -// Hash represents bytes of a hash -type Hash []byte - -// MerkleSummary encloses the summary of the merkle tree that consists of the hashes of the results of a range query. -// This allows to reduce the size of RWSet in the presence of range query results -// by storing certain hashes instead of actual results. -type MerkleSummary struct { - MaxDegree int - MaxLevel MerkleTreeLevel - MaxLevelHashes []Hash -} - -// NsReadWriteSet - a collection of all the reads and writes that belong to a common namespace -type NsReadWriteSet struct { - NameSpace string - Reads []*KVRead - Writes []*KVWrite - RangeQueriesInfo []*RangeQueryInfo -} - -// TxReadWriteSet - a collection of all the reads and writes collected as a result of a transaction simulation -type TxReadWriteSet struct { - NsRWs []*NsReadWriteSet -} - -// Equal verifies whether the give MerkleSummary is equals to this -func (ms *MerkleSummary) Equal(anotherMS *MerkleSummary) bool { - if anotherMS == nil { - return false - } - if ms.MaxDegree != anotherMS.MaxDegree || - ms.MaxLevel != anotherMS.MaxLevel || - len(ms.MaxLevelHashes) != len(anotherMS.MaxLevelHashes) { - return false - } - for i := 0; i < len(ms.MaxLevelHashes); i++ { - if !bytes.Equal(ms.MaxLevelHashes[i], anotherMS.MaxLevelHashes[i]) { - return false - } - } - return true -} - -// Marshal serializes a `KVRead` -func (r *KVRead) Marshal(buf *proto.Buffer) error { - if err := buf.EncodeStringBytes(r.Key); err != nil { - return err - } - versionBytes := []byte{} - if r.Version != nil { - versionBytes = r.Version.ToBytes() - } - if err := buf.EncodeRawBytes(versionBytes); err != nil { - return err - } - return nil -} - -// Unmarshal deserializes a `KVRead` -func (r *KVRead) Unmarshal(buf *proto.Buffer) error { - var err error - var versionBytes []byte - if r.Key, err = buf.DecodeStringBytes(); err != nil { - return err - } - if versionBytes, err = buf.DecodeRawBytes(false); err != nil { - return err - } - if len(versionBytes) > 0 { - r.Version, _ = version.NewHeightFromBytes(versionBytes) - } - return nil -} - -// Marshal serializes a `RangeQueryInfo` -func (rqi *RangeQueryInfo) Marshal(buf *proto.Buffer) error { - if err := buf.EncodeStringBytes(rqi.StartKey); err != nil { - return err - } - if err := buf.EncodeStringBytes(rqi.EndKey); err != nil { - return err - } - - itrExhausedMarker := 0 // iterator did not get exhausted - if rqi.ItrExhausted { - itrExhausedMarker = 1 - } - if err := buf.EncodeVarint(uint64(itrExhausedMarker)); err != nil { - return err - } - - if err := buf.EncodeVarint(uint64(len(rqi.Results))); err != nil { - return err - } - for i := 0; i < len(rqi.Results); i++ { - if err := rqi.Results[i].Marshal(buf); err != nil { - return err - } - } - hashPresentMarker := 0 - if rqi.ResultHash != nil { - hashPresentMarker = 1 - } - if err := buf.EncodeVarint(uint64(hashPresentMarker)); err != nil { - return err - } - if rqi.ResultHash != nil { - if err := rqi.ResultHash.Marshal(buf); err != nil { - return err - } - } - return nil -} - -// Unmarshal deserializes a `RangeQueryInfo` -func (rqi *RangeQueryInfo) Unmarshal(buf *proto.Buffer) error { - var err error - var numResults uint64 - var itrExhaustedMarker uint64 - var hashPresentMarker uint64 - - if rqi.StartKey, err = buf.DecodeStringBytes(); err != nil { - return err - } - if rqi.EndKey, err = buf.DecodeStringBytes(); err != nil { - return err - } - if itrExhaustedMarker, err = buf.DecodeVarint(); err != nil { - return err - } - if itrExhaustedMarker == 1 { - rqi.ItrExhausted = true - } else { - rqi.ItrExhausted = false - } - if numResults, err = buf.DecodeVarint(); err != nil { - return err - } - if numResults > 0 { - rqi.Results = make([]*KVRead, int(numResults)) - } - for i := 0; i < int(numResults); i++ { - kvRead := &KVRead{} - if err := kvRead.Unmarshal(buf); err != nil { - return err - } - rqi.Results[i] = kvRead - } - if hashPresentMarker, err = buf.DecodeVarint(); err != nil { - return err - } - if hashPresentMarker == 0 { - return nil - } - resultHash := &MerkleSummary{} - if err := resultHash.Unmarshal(buf); err != nil { - return err - } - rqi.ResultHash = resultHash - return nil -} - -// Marshal serializes a `QueryResultHash` -func (ms *MerkleSummary) Marshal(buf *proto.Buffer) error { - if err := buf.EncodeVarint(uint64(ms.MaxDegree)); err != nil { - return err - } - if err := buf.EncodeVarint(uint64(ms.MaxLevel)); err != nil { - return err - } - if err := buf.EncodeVarint(uint64(len(ms.MaxLevelHashes))); err != nil { - return err - } - for i := 0; i < len(ms.MaxLevelHashes); i++ { - if err := buf.EncodeRawBytes(ms.MaxLevelHashes[i]); err != nil { - return err - } - } - return nil -} - -// Unmarshal deserializes a `QueryResultHash` -func (ms *MerkleSummary) Unmarshal(buf *proto.Buffer) error { - var err error - var maxDegree uint64 - var level uint64 - var numHashes uint64 - var hash []byte - - if maxDegree, err = buf.DecodeVarint(); err != nil { - return err - } - if level, err = buf.DecodeVarint(); err != nil { - return err - } - if numHashes, err = buf.DecodeVarint(); err != nil { - return err - } - ms.MaxDegree = int(maxDegree) - ms.MaxLevel = MerkleTreeLevel(int(level)) - for i := 0; i < int(numHashes); i++ { - if hash, err = buf.DecodeRawBytes(false); err != nil { - return err - } - ms.MaxLevelHashes = append(ms.MaxLevelHashes, hash) - } - return nil -} - -// Marshal serializes a `KVWrite` -func (w *KVWrite) Marshal(buf *proto.Buffer) error { - var err error - if err = buf.EncodeStringBytes(w.Key); err != nil { - return err - } - deleteMarker := 0 - if w.IsDelete { - deleteMarker = 1 - } - if err = buf.EncodeVarint(uint64(deleteMarker)); err != nil { - return err - } - if deleteMarker == 0 { - if err = buf.EncodeRawBytes(w.Value); err != nil { - return err - } - } - return nil -} - -// Unmarshal deserializes a `KVWrite` -func (w *KVWrite) Unmarshal(buf *proto.Buffer) error { - var err error - if w.Key, err = buf.DecodeStringBytes(); err != nil { - return err - } - var deleteMarker uint64 - if deleteMarker, err = buf.DecodeVarint(); err != nil { - return err - } - if deleteMarker == 1 { - w.IsDelete = true - return nil - } - if w.Value, err = buf.DecodeRawBytes(false); err != nil { - return err - } - return nil -} - -// Marshal serializes a `NsReadWriteSet` -func (nsRW *NsReadWriteSet) Marshal(buf *proto.Buffer) error { - var err error - if err = buf.EncodeStringBytes(nsRW.NameSpace); err != nil { - return err - } - if err = buf.EncodeVarint(uint64(len(nsRW.Reads))); err != nil { - return err - } - for i := 0; i < len(nsRW.Reads); i++ { - if err = nsRW.Reads[i].Marshal(buf); err != nil { - return err - } - } - if err = buf.EncodeVarint(uint64(len(nsRW.Writes))); err != nil { - return err - } - for i := 0; i < len(nsRW.Writes); i++ { - if err = nsRW.Writes[i].Marshal(buf); err != nil { - return err - } - } - if err = buf.EncodeVarint(uint64(len(nsRW.RangeQueriesInfo))); err != nil { - return err - } - for i := 0; i < len(nsRW.RangeQueriesInfo); i++ { - if err = nsRW.RangeQueriesInfo[i].Marshal(buf); err != nil { - return err - } - } - return nil -} - -// Unmarshal deserializes a `NsReadWriteSet` -func (nsRW *NsReadWriteSet) Unmarshal(buf *proto.Buffer) error { - var err error - if nsRW.NameSpace, err = buf.DecodeStringBytes(); err != nil { - return err - } - var numReads uint64 - if numReads, err = buf.DecodeVarint(); err != nil { - return err - } - for i := 0; i < int(numReads); i++ { - r := &KVRead{} - if err = r.Unmarshal(buf); err != nil { - return err - } - nsRW.Reads = append(nsRW.Reads, r) - } - - var numWrites uint64 - if numWrites, err = buf.DecodeVarint(); err != nil { - return err - } - for i := 0; i < int(numWrites); i++ { - w := &KVWrite{} - if err = w.Unmarshal(buf); err != nil { - return err - } - nsRW.Writes = append(nsRW.Writes, w) - } - - var numRangeQueriesInfo uint64 - if numRangeQueriesInfo, err = buf.DecodeVarint(); err != nil { - return err - } - for i := 0; i < int(numRangeQueriesInfo); i++ { - rqInfo := &RangeQueryInfo{} - if err = rqInfo.Unmarshal(buf); err != nil { - return err - } - nsRW.RangeQueriesInfo = append(nsRW.RangeQueriesInfo, rqInfo) - } - return nil -} - -// Marshal serializes a `TxReadWriteSet` -func (txRW *TxReadWriteSet) Marshal() ([]byte, error) { - buf := proto.NewBuffer(nil) - var err error - if err = buf.EncodeVarint(uint64(len(txRW.NsRWs))); err != nil { - return nil, err - } - for i := 0; i < len(txRW.NsRWs); i++ { - if err = txRW.NsRWs[i].Marshal(buf); err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} - -// Unmarshal deserializes a `TxReadWriteSet` -func (txRW *TxReadWriteSet) Unmarshal(b []byte) error { - buf := proto.NewBuffer(b) - var err error - var numEntries uint64 - if numEntries, err = buf.DecodeVarint(); err != nil { - return err - } - for i := 0; i < int(numEntries); i++ { - nsRW := &NsReadWriteSet{} - if err = nsRW.Unmarshal(buf); err != nil { - return err - } - txRW.NsRWs = append(txRW.NsRWs, nsRW) - } - return nil -} - -// String prints a `KVRead` -func (r *KVRead) String() string { - return fmt.Sprintf("%s:%d", r.Key, r.Version) -} - -// String prints a `KVWrite` -func (w *KVWrite) String() string { - return fmt.Sprintf("%s=[%#v]", w.Key, w.Value) -} - -// String prints a range query info -func (rqi *RangeQueryInfo) String() string { - return fmt.Sprintf("StartKey=%s, EndKey=%s, ItrExhausted=%t, Results=%#v, Hash=%#v", - rqi.StartKey, rqi.EndKey, rqi.ItrExhausted, rqi.Results, rqi.ResultHash) -} - -// String prints a `NsReadWriteSet` -func (nsRW *NsReadWriteSet) String() string { - var buffer bytes.Buffer - buffer.WriteString("ReadSet=\n") - for _, r := range nsRW.Reads { - buffer.WriteString("\t") - buffer.WriteString(r.String()) - buffer.WriteString("\n") - } - buffer.WriteString("WriteSet=\n") - for _, w := range nsRW.Writes { - buffer.WriteString("\t") - buffer.WriteString(w.String()) - buffer.WriteString("\n") - } - buffer.WriteString("RangeQueriesInfo=\n") - for _, rqi := range nsRW.RangeQueriesInfo { - buffer.WriteString("\t") - buffer.WriteString(rqi.String()) - buffer.WriteString("\n") - } - return buffer.String() -} - -// String prints a `TxReadWriteSet` -func (txRW *TxReadWriteSet) String() string { - var buffer bytes.Buffer - for _, nsRWSet := range txRW.NsRWs { - buffer.WriteString(nsRWSet.NameSpace) - buffer.WriteString("::") - buffer.WriteString(nsRWSet.String()) - } - return buffer.String() -} diff --git a/core/ledger/kvledger/txmgmt/rwset/rwset_holder_test.go b/core/ledger/kvledger/txmgmt/rwset/rwset_holder_test.go deleted file mode 100644 index 8f6c787751a..00000000000 --- a/core/ledger/kvledger/txmgmt/rwset/rwset_holder_test.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rwset - -import ( - "testing" - - "github.com/hyperledger/fabric/common/ledger/testutil" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" -) - -func TestRWSetHolder(t *testing.T) { - rwSet := NewRWSet() - - rwSet.AddToReadSet("ns1", "key2", version.NewHeight(1, 2)) - rwSet.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) - rwSet.AddToWriteSet("ns1", "key2", []byte("value2")) - - rqi1 := &RangeQueryInfo{"bKey", "", false, nil, nil} - rqi1.EndKey = "eKey" - rqi1.Results = []*KVRead{NewKVRead("bKey1", version.NewHeight(2, 3)), NewKVRead("bKey2", version.NewHeight(2, 4))} - rqi1.ItrExhausted = true - rwSet.AddToRangeQuerySet("ns1", rqi1) - - rqi2 := &RangeQueryInfo{"bKey", "", false, nil, nil} - rqi2.EndKey = "eKey" - rqi2.Results = []*KVRead{NewKVRead("bKey1", version.NewHeight(2, 3)), NewKVRead("bKey2", version.NewHeight(2, 4))} - rqi2.ItrExhausted = true - rwSet.AddToRangeQuerySet("ns1", rqi2) - - rqi3 := &RangeQueryInfo{"bKey", "", true, nil, nil} - rwSet.AddToRangeQuerySet("ns1", rqi3) - rqi3.EndKey = "eKey1" - rqi3.Results = []*KVRead{NewKVRead("bKey1", version.NewHeight(2, 3)), NewKVRead("bKey2", version.NewHeight(2, 4))} - - rwSet.AddToReadSet("ns2", "key2", version.NewHeight(1, 2)) - rwSet.AddToWriteSet("ns2", "key3", []byte("value3")) - - txRWSet := rwSet.GetTxReadWriteSet() - - ns1RWSet := &NsReadWriteSet{"ns1", - []*KVRead{&KVRead{"key1", version.NewHeight(1, 1)}, &KVRead{"key2", version.NewHeight(1, 2)}}, - []*KVWrite{&KVWrite{"key2", false, []byte("value2")}}, - []*RangeQueryInfo{rqi1, rqi3}} - - ns2RWSet := &NsReadWriteSet{"ns2", - []*KVRead{&KVRead{"key2", version.NewHeight(1, 2)}}, - []*KVWrite{&KVWrite{"key3", false, []byte("value3")}}, - []*RangeQueryInfo{}} - - expectedTxRWSet := &TxReadWriteSet{[]*NsReadWriteSet{ns1RWSet, ns2RWSet}} - t.Logf("Actual=%s\n Expected=%s", txRWSet, expectedTxRWSet) - testutil.AssertEquals(t, txRWSet, expectedTxRWSet) -} diff --git a/core/ledger/kvledger/txmgmt/rwset/rwset_test.go b/core/ledger/kvledger/txmgmt/rwset/rwset_test.go deleted file mode 100644 index 792184f6533..00000000000 --- a/core/ledger/kvledger/txmgmt/rwset/rwset_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rwset - -import ( - "testing" - - "github.com/hyperledger/fabric/common/ledger/testutil" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" -) - -func TestNilTxRWSet(t *testing.T) { - txRW := &TxReadWriteSet{} - nsRW1 := &NsReadWriteSet{"ns1", - []*KVRead{&KVRead{"key1", nil}}, - []*KVWrite{&KVWrite{"key1", false, []byte("value1")}}, - nil} - txRW.NsRWs = append(txRW.NsRWs, nsRW1) - b, err := txRW.Marshal() - testutil.AssertNoError(t, err, "Error while marshalling changeset") - - deserializedRWSet := &TxReadWriteSet{} - err = deserializedRWSet.Unmarshal(b) - testutil.AssertNoError(t, err, "Error while unmarshalling changeset") - t.Logf("Unmarshalled changeset = %#+v", deserializedRWSet.NsRWs[0].Writes[0].IsDelete) - testutil.AssertEquals(t, deserializedRWSet, txRW) -} - -func TestTxRWSetMarshalUnmarshal(t *testing.T) { - txRW := &TxReadWriteSet{} - nsRW1 := &NsReadWriteSet{"ns1", - []*KVRead{&KVRead{"key1", version.NewHeight(1, 1)}}, - []*KVWrite{&KVWrite{"key2", false, []byte("value2")}}, - nil} - - nsRW2 := &NsReadWriteSet{"ns2", - []*KVRead{&KVRead{"key3", version.NewHeight(1, 2)}}, - []*KVWrite{&KVWrite{"key4", true, nil}}, - nil} - - nsRW3 := &NsReadWriteSet{"ns3", - []*KVRead{&KVRead{"key5", version.NewHeight(1, 3)}}, - []*KVWrite{&KVWrite{"key6", false, []byte("value6")}, &KVWrite{"key7", false, []byte("value7")}}, - nil} - - nsRW4 := &NsReadWriteSet{"ns4", - []*KVRead{&KVRead{"key8", version.NewHeight(1, 3)}}, - []*KVWrite{&KVWrite{"key9", false, []byte("value9")}, &KVWrite{"key10", false, []byte("value10")}}, - []*RangeQueryInfo{&RangeQueryInfo{"startKey1", "endKey1", true, nil, - &MerkleSummary{20, 1, []Hash{testutil.ConstructRandomBytes(t, 10)}}}}} - - nsRW5 := &NsReadWriteSet{"ns5", - nil, - nil, - []*RangeQueryInfo{&RangeQueryInfo{"startKey2", "endKey2", false, []*KVRead{&KVRead{"key11", version.NewHeight(1, 3)}}, nil}}} - - nsRW6 := &NsReadWriteSet{"ns6", - nil, - nil, - []*RangeQueryInfo{ - &RangeQueryInfo{"startKey2", "endKey2", false, []*KVRead{&KVRead{"key11", version.NewHeight(1, 3)}}, nil}, - &RangeQueryInfo{"startKey3", "endKey3", true, []*KVRead{&KVRead{"key12", version.NewHeight(2, 4)}}, nil}}} - - txRW.NsRWs = append(txRW.NsRWs, nsRW1, nsRW2, nsRW3, nsRW4, nsRW5, nsRW6) - t.Logf("Testing txRWSet = %s", txRW) - b, err := txRW.Marshal() - testutil.AssertNoError(t, err, "Error while marshalling changeset") - - deserializedRWSet := &TxReadWriteSet{} - err = deserializedRWSet.Unmarshal(b) - testutil.AssertNoError(t, err, "Error while unmarshalling changeset") - testutil.AssertEquals(t, deserializedRWSet, txRW) -} diff --git a/core/ledger/kvledger/txmgmt/rwset/query_results_helper.go b/core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper.go similarity index 84% rename from core/ledger/kvledger/txmgmt/rwset/query_results_helper.go rename to core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper.go index 5944c410d21..f63a7234fd6 100644 --- a/core/ledger/kvledger/txmgmt/rwset/query_results_helper.go +++ b/core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rwset +package rwsetutil import ( "fmt" @@ -22,8 +22,15 @@ import ( "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric/bccsp" bccspfactory "github.com/hyperledger/fabric/bccsp/factory" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" ) +// MerkleTreeLevel used for representing a level of the merkle tree +type MerkleTreeLevel uint32 + +// Hash represents bytes of a hash +type Hash []byte + const ( leafLevel = MerkleTreeLevel(1) ) @@ -53,14 +60,14 @@ var ( // `AddResult` function should be invoke to supply the next result and at the end `Done` function should be invoked. // The `Done` function does the final processing and returns the final output type RangeQueryResultsHelper struct { - pendingResults []*KVRead + pendingResults []*kvrwset.KVRead hashingEnabled bool - maxDegree int + maxDegree uint32 mt *merkleTree } // NewRangeQueryResultsHelper constructs a RangeQueryResultsHelper -func NewRangeQueryResultsHelper(enableHashing bool, maxDegree int) (*RangeQueryResultsHelper, error) { +func NewRangeQueryResultsHelper(enableHashing bool, maxDegree uint32) (*RangeQueryResultsHelper, error) { helper := &RangeQueryResultsHelper{nil, enableHashing, maxDegree, nil} if enableHashing { var err error @@ -74,10 +81,10 @@ func NewRangeQueryResultsHelper(enableHashing bool, maxDegree int) (*RangeQueryR // AddResult adds a new query result for processing. // Put the result into the list of pending results. If the number of pending results exceeds `maxDegree`, // consume the results for incrementally update the merkle tree -func (helper *RangeQueryResultsHelper) AddResult(kvRead *KVRead) error { +func (helper *RangeQueryResultsHelper) AddResult(kvRead *kvrwset.KVRead) error { logger.Debug("Adding a result") helper.pendingResults = append(helper.pendingResults, kvRead) - if helper.hashingEnabled && len(helper.pendingResults) > helper.maxDegree { + if helper.hashingEnabled && uint32(len(helper.pendingResults)) > helper.maxDegree { logger.Debug("Processing the accumulated results") if err := helper.processPendingResults(); err != nil { return err @@ -91,7 +98,7 @@ func (helper *RangeQueryResultsHelper) AddResult(kvRead *KVRead) error { // Only one of these two will be non-nil (except when no results are ever added). // `MerkleSummary` will be nil if and only if either `enableHashing` is set to false // or the number of total results are less than `maxDegree` -func (helper *RangeQueryResultsHelper) Done() ([]*KVRead, *MerkleSummary, error) { +func (helper *RangeQueryResultsHelper) Done() ([]*kvrwset.KVRead, *kvrwset.QueryReadsMerkleSummary, error) { // The merkle tree will be empty if total results are less than or equals to 'maxDegree' // i.e., not even once the results were processed for hashing if !helper.hashingEnabled || helper.mt.isEmpty() { @@ -111,7 +118,7 @@ func (helper *RangeQueryResultsHelper) Done() ([]*KVRead, *MerkleSummary, error) // This intermediate state of the merkle tree helps during validation to detect a mismatch early on. // That helps by not requiring to build the complete merkle tree during validation // if there is a mismatch in early portion of the result-set. -func (helper *RangeQueryResultsHelper) GetMerkleSummary() *MerkleSummary { +func (helper *RangeQueryResultsHelper) GetMerkleSummary() *kvrwset.QueryReadsMerkleSummary { if !helper.hashingEnabled { return nil } @@ -133,28 +140,19 @@ func (helper *RangeQueryResultsHelper) processPendingResults() error { return nil } -func serializeKVReads(kvReads []*KVRead) ([]byte, error) { - buf := proto.NewBuffer(nil) - if err := buf.EncodeVarint(uint64(len(kvReads))); err != nil { - return nil, err - } - for i := 0; i < len(kvReads); i++ { - if err := kvReads[i].Marshal(buf); err != nil { - return nil, err - } - } - return buf.Bytes(), nil +func serializeKVReads(kvReads []*kvrwset.KVRead) ([]byte, error) { + return proto.Marshal(&kvrwset.QueryReads{KvReads: kvReads}) } //////////// Merkle tree building code /////// type merkleTree struct { - maxDegree int + maxDegree uint32 tree map[MerkleTreeLevel][]Hash maxLevel MerkleTreeLevel } -func newMerkleTree(maxDegree int) (*merkleTree, error) { +func newMerkleTree(maxDegree uint32) (*merkleTree, error) { if maxDegree < 2 { return nil, fmt.Errorf("maxDegree [is %d] should not be less than 2 in the merkle tree", maxDegree) } @@ -171,7 +169,7 @@ func (m *merkleTree) update(nextLeafLevelHash Hash) error { currentLevel := leafLevel for { currentLevelHashes := m.tree[currentLevel] - if len(currentLevelHashes) <= m.maxDegree { + if uint32(len(currentLevelHashes)) <= m.maxDegree { return nil } nextLevelHash, err := computeCombinedHash(currentLevelHashes) @@ -216,7 +214,7 @@ func (m *merkleTree) done() error { } finalHashes := m.tree[m.maxLevel] - if len(finalHashes) > m.maxDegree { + if uint32(len(finalHashes)) > m.maxDegree { delete(m.tree, m.maxLevel) m.maxLevel++ combinedHash, err := computeCombinedHash(finalHashes) @@ -228,8 +226,10 @@ func (m *merkleTree) done() error { return nil } -func (m *merkleTree) getSummery() *MerkleSummary { - return &MerkleSummary{m.maxDegree, m.maxLevel, m.tree[m.maxLevel]} +func (m *merkleTree) getSummery() *kvrwset.QueryReadsMerkleSummary { + return &kvrwset.QueryReadsMerkleSummary{MaxDegree: m.maxDegree, + MaxLevel: uint32(m.maxLevel), + MaxLevelHashes: hashesToBytes(m.tree[m.maxLevel])} } func (m *merkleTree) getMaxLevel() MerkleTreeLevel { @@ -255,3 +255,11 @@ func computeCombinedHash(hashes []Hash) (Hash, error) { } return bccspfactory.GetDefault().Hash(combinedHash, hashOpts) } + +func hashesToBytes(hashes []Hash) [][]byte { + b := [][]byte{} + for _, hash := range hashes { + b = append(b, hash) + } + return b +} diff --git a/core/ledger/kvledger/txmgmt/rwset/query_results_helper_test.go b/core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper_test.go similarity index 81% rename from core/ledger/kvledger/txmgmt/rwset/query_results_helper_test.go rename to core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper_test.go index 22e31a3b3eb..f0ede62d63d 100644 --- a/core/ledger/kvledger/txmgmt/rwset/query_results_helper_test.go +++ b/core/ledger/kvledger/txmgmt/rwsetutil/query_results_helper_test.go @@ -14,16 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rwset +package rwsetutil import ( "testing" "fmt" + "github.com/golang/protobuf/proto" bccspfactory "github.com/hyperledger/fabric/bccsp/factory" "github.com/hyperledger/fabric/common/ledger/testutil" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" ) func TestQueryResultHelper_NoResults(t *testing.T) { @@ -58,7 +60,10 @@ func TestQueryResultHelper_Hash_OneLevel(t *testing.T) { level1_2 := computeTestHashKVReads(t, kvReads[4:8]) level1_3 := computeTestHashKVReads(t, kvReads[8:]) testutil.AssertNil(t, r) - testutil.AssertEquals(t, h, &MerkleSummary{maxDegree, 1, []Hash{level1_1, level1_2, level1_3}}) + testutil.AssertEquals(t, h, &kvrwset.QueryReadsMerkleSummary{ + MaxDegree: uint32(maxDegree), + MaxLevel: 1, + MaxLevelHashes: hashesToBytes([]Hash{level1_1, level1_2, level1_3})}) } func TestQueryResultHelper_Hash_TwoLevel(t *testing.T) { @@ -76,7 +81,10 @@ func TestQueryResultHelper_Hash_TwoLevel(t *testing.T) { level2_1 := computeTestCombinedHash(t, level1_1, level1_2, level1_3, level1_4) level2_2 := computeTestCombinedHash(t, level1_5, level1_6, level1_7) testutil.AssertNil(t, r) - testutil.AssertEquals(t, h, &MerkleSummary{maxDegree, 2, []Hash{level2_1, level2_2}}) + testutil.AssertEquals(t, h, &kvrwset.QueryReadsMerkleSummary{ + MaxDegree: uint32(maxDegree), + MaxLevel: 2, + MaxLevelHashes: hashesToBytes([]Hash{level2_1, level2_2})}) } func TestQueryResultHelper_Hash_ThreeLevel(t *testing.T) { @@ -109,11 +117,14 @@ func TestQueryResultHelper_Hash_ThreeLevel(t *testing.T) { level3_1 := computeTestCombinedHash(t, level2_1, level2_2, level2_3, level2_4) level3_2 := level1_17 testutil.AssertNil(t, r) - testutil.AssertEquals(t, h, &MerkleSummary{maxDegree, 3, []Hash{level3_1, level3_2}}) + testutil.AssertEquals(t, h, &kvrwset.QueryReadsMerkleSummary{ + MaxDegree: uint32(maxDegree), + MaxLevel: 3, + MaxLevelHashes: hashesToBytes([]Hash{level3_1, level3_2})}) } -func buildTestResults(t *testing.T, enableHashing bool, maxDegree int, kvReads []*KVRead) ([]*KVRead, *MerkleSummary) { - helper, _ := NewRangeQueryResultsHelper(enableHashing, maxDegree) +func buildTestResults(t *testing.T, enableHashing bool, maxDegree int, kvReads []*kvrwset.KVRead) ([]*kvrwset.KVRead, *kvrwset.QueryReadsMerkleSummary) { + helper, _ := NewRangeQueryResultsHelper(enableHashing, uint32(maxDegree)) for _, kvRead := range kvReads { helper.AddResult(kvRead) } @@ -122,16 +133,18 @@ func buildTestResults(t *testing.T, enableHashing bool, maxDegree int, kvReads [ return r, h } -func buildTestKVReads(t *testing.T, num int) []*KVRead { - kvreads := []*KVRead{} +func buildTestKVReads(t *testing.T, num int) []*kvrwset.KVRead { + kvreads := []*kvrwset.KVRead{} for i := 0; i < num; i++ { kvreads = append(kvreads, NewKVRead(fmt.Sprintf("key_%d", i), version.NewHeight(1, uint64(i)))) } return kvreads } -func computeTestHashKVReads(t *testing.T, kvReads []*KVRead) Hash { - b, err := serializeKVReads(kvReads) +func computeTestHashKVReads(t *testing.T, kvReads []*kvrwset.KVRead) Hash { + queryReads := &kvrwset.QueryReads{} + queryReads.KvReads = kvReads + b, err := proto.Marshal(queryReads) testutil.AssertNoError(t, err, "") h, err := bccspfactory.GetDefault().Hash(b, hashOpts) testutil.AssertNoError(t, err, "") diff --git a/core/ledger/kvledger/txmgmt/rwset/rwset_holder.go b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder.go similarity index 63% rename from core/ledger/kvledger/txmgmt/rwset/rwset_holder.go rename to core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder.go index 1d0b20ba45d..2caf52eb46f 100644 --- a/core/ledger/kvledger/txmgmt/rwset/rwset_holder.go +++ b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder.go @@ -14,25 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rwset +package rwsetutil import ( "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" "github.com/hyperledger/fabric/core/ledger/util" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" logging "github.com/op/go-logging" ) var logger = logging.MustGetLogger("rwset") type nsRWs struct { - readMap map[string]*KVRead //for mvcc validation - writeMap map[string]*KVWrite - rangeQueriesMap map[rangeQueryKey]*RangeQueryInfo //for phantom read validation + readMap map[string]*kvrwset.KVRead //for mvcc validation + writeMap map[string]*kvrwset.KVWrite + rangeQueriesMap map[rangeQueryKey]*kvrwset.RangeQueryInfo //for phantom read validation rangeQueriesKeys []rangeQueryKey } func newNsRWs() *nsRWs { - return &nsRWs{make(map[string]*KVRead), make(map[string]*KVWrite), make(map[rangeQueryKey]*RangeQueryInfo), nil} + return &nsRWs{make(map[string]*kvrwset.KVRead), + make(map[string]*kvrwset.KVWrite), + make(map[rangeQueryKey]*kvrwset.RangeQueryInfo), nil} } type rangeQueryKey struct { @@ -41,30 +44,30 @@ type rangeQueryKey struct { itrExhausted bool } -// RWSet maintains the read-write set -type RWSet struct { +// RWSetBuilder helps building the read-write set +type RWSetBuilder struct { rwMap map[string]*nsRWs } -// NewRWSet constructs a new instance of RWSet -func NewRWSet() *RWSet { - return &RWSet{make(map[string]*nsRWs)} +// NewRWSetBuilder constructs a new instance of RWSetBuilder +func NewRWSetBuilder() *RWSetBuilder { + return &RWSetBuilder{make(map[string]*nsRWs)} } // AddToReadSet adds a key and corresponding version to the read-set -func (rws *RWSet) AddToReadSet(ns string, key string, version *version.Height) { +func (rws *RWSetBuilder) AddToReadSet(ns string, key string, version *version.Height) { nsRWs := rws.getOrCreateNsRW(ns) nsRWs.readMap[key] = NewKVRead(key, version) } // AddToWriteSet adds a key and value to the write-set -func (rws *RWSet) AddToWriteSet(ns string, key string, value []byte) { +func (rws *RWSetBuilder) AddToWriteSet(ns string, key string, value []byte) { nsRWs := rws.getOrCreateNsRW(ns) - nsRWs.writeMap[key] = NewKVWrite(key, value) + nsRWs.writeMap[key] = newKVWrite(key, value) } // AddToRangeQuerySet adds a range query info for performing phantom read validation -func (rws *RWSet) AddToRangeQuerySet(ns string, rqi *RangeQueryInfo) { +func (rws *RWSetBuilder) AddToRangeQuerySet(ns string, rqi *kvrwset.RangeQueryInfo) { nsRWs := rws.getOrCreateNsRW(ns) key := rangeQueryKey{rqi.StartKey, rqi.EndKey, rqi.ItrExhausted} _, ok := nsRWs.rangeQueriesMap[key] @@ -74,55 +77,42 @@ func (rws *RWSet) AddToRangeQuerySet(ns string, rqi *RangeQueryInfo) { } } -// GetFromWriteSet return the value of a key from the write-set -func (rws *RWSet) GetFromWriteSet(ns string, key string) ([]byte, bool) { - nsRWs, ok := rws.rwMap[ns] - if !ok { - return nil, false - } - var value []byte - kvWrite, ok := nsRWs.writeMap[key] - if ok && !kvWrite.IsDelete { - value = kvWrite.Value - } - return value, ok -} - // GetTxReadWriteSet returns the read-write set in the form that can be serialized -func (rws *RWSet) GetTxReadWriteSet() *TxReadWriteSet { - txRWSet := &TxReadWriteSet{} +func (rws *RWSetBuilder) GetTxReadWriteSet() *TxRwSet { + txRWSet := &TxRwSet{} sortedNamespaces := util.GetSortedKeys(rws.rwMap) for _, ns := range sortedNamespaces { //Get namespace specific read-writes nsReadWriteMap := rws.rwMap[ns] //add read set - reads := []*KVRead{} + var reads []*kvrwset.KVRead sortedReadKeys := util.GetSortedKeys(nsReadWriteMap.readMap) for _, key := range sortedReadKeys { reads = append(reads, nsReadWriteMap.readMap[key]) } //add write set - writes := []*KVWrite{} + var writes []*kvrwset.KVWrite sortedWriteKeys := util.GetSortedKeys(nsReadWriteMap.writeMap) for _, key := range sortedWriteKeys { writes = append(writes, nsReadWriteMap.writeMap[key]) } //add range query info - rangeQueriesInfo := []*RangeQueryInfo{} + var rangeQueriesInfo []*kvrwset.RangeQueryInfo rangeQueriesMap := nsReadWriteMap.rangeQueriesMap for _, key := range nsReadWriteMap.rangeQueriesKeys { rangeQueriesInfo = append(rangeQueriesInfo, rangeQueriesMap[key]) } - nsRWs := &NsReadWriteSet{NameSpace: ns, Reads: reads, Writes: writes, RangeQueriesInfo: rangeQueriesInfo} - txRWSet.NsRWs = append(txRWSet.NsRWs, nsRWs) + kvRWs := &kvrwset.KVRWSet{Reads: reads, Writes: writes, RangeQueriesInfo: rangeQueriesInfo} + nsRWs := &NsRwSet{ns, kvRWs} + txRWSet.NsRwSets = append(txRWSet.NsRwSets, nsRWs) } return txRWSet } -func (rws *RWSet) getOrCreateNsRW(ns string) *nsRWs { +func (rws *RWSetBuilder) getOrCreateNsRW(ns string) *nsRWs { var nsRWs *nsRWs var ok bool if nsRWs, ok = rws.rwMap[ns]; !ok { diff --git a/core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder_test.go b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder_test.go new file mode 100644 index 00000000000..0ca61af1aa7 --- /dev/null +++ b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder_test.go @@ -0,0 +1,69 @@ +/* +Copyright IBM Corp. 2016 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rwsetutil + +import ( + "testing" + + "github.com/hyperledger/fabric/common/ledger/testutil" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" +) + +func TestRWSetHolder(t *testing.T) { + rwSetBuilder := NewRWSetBuilder() + + rwSetBuilder.AddToReadSet("ns1", "key2", version.NewHeight(1, 2)) + rwSetBuilder.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) + rwSetBuilder.AddToWriteSet("ns1", "key2", []byte("value2")) + + rqi1 := &kvrwset.RangeQueryInfo{StartKey: "bKey", EndKey: "", ItrExhausted: false, ReadsInfo: nil} + rqi1.EndKey = "eKey" + rqi1.SetRawReads([]*kvrwset.KVRead{NewKVRead("bKey1", version.NewHeight(2, 3)), NewKVRead("bKey2", version.NewHeight(2, 4))}) + rqi1.ItrExhausted = true + rwSetBuilder.AddToRangeQuerySet("ns1", rqi1) + + rqi2 := &kvrwset.RangeQueryInfo{StartKey: "bKey", EndKey: "", ItrExhausted: false, ReadsInfo: nil} + rqi2.EndKey = "eKey" + rqi2.SetRawReads([]*kvrwset.KVRead{NewKVRead("bKey1", version.NewHeight(2, 3)), NewKVRead("bKey2", version.NewHeight(2, 4))}) + rqi2.ItrExhausted = true + rwSetBuilder.AddToRangeQuerySet("ns1", rqi2) + + rqi3 := &kvrwset.RangeQueryInfo{StartKey: "bKey", EndKey: "", ItrExhausted: true, ReadsInfo: nil} + rqi3.EndKey = "eKey1" + rqi3.SetRawReads([]*kvrwset.KVRead{NewKVRead("bKey1", version.NewHeight(2, 3)), NewKVRead("bKey2", version.NewHeight(2, 4))}) + rwSetBuilder.AddToRangeQuerySet("ns1", rqi3) + + rwSetBuilder.AddToReadSet("ns2", "key2", version.NewHeight(1, 2)) + rwSetBuilder.AddToWriteSet("ns2", "key3", []byte("value3")) + + txRWSet := rwSetBuilder.GetTxReadWriteSet() + + ns1RWSet := &NsRwSet{"ns1", &kvrwset.KVRWSet{ + Reads: []*kvrwset.KVRead{NewKVRead("key1", version.NewHeight(1, 1)), NewKVRead("key2", version.NewHeight(1, 2))}, + RangeQueriesInfo: []*kvrwset.RangeQueryInfo{rqi1, rqi3}, + Writes: []*kvrwset.KVWrite{newKVWrite("key2", []byte("value2"))}}} + + ns2RWSet := &NsRwSet{"ns2", &kvrwset.KVRWSet{ + Reads: []*kvrwset.KVRead{NewKVRead("key2", version.NewHeight(1, 2))}, + RangeQueriesInfo: nil, + Writes: []*kvrwset.KVWrite{newKVWrite("key3", []byte("value3"))}}} + + expectedTxRWSet := &TxRwSet{[]*NsRwSet{ns1RWSet, ns2RWSet}} + t.Logf("Actual=%s\n Expected=%s", txRWSet, expectedTxRWSet) + testutil.AssertEquals(t, txRWSet, expectedTxRWSet) +} diff --git a/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util.go b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util.go new file mode 100644 index 00000000000..baad04231b7 --- /dev/null +++ b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util.go @@ -0,0 +1,103 @@ +/* +Copyright IBM Corp. 2016 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rwsetutil + +import ( + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" + "github.com/hyperledger/fabric/protos/ledger/rwset" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" +) + +// TxRwSet acts as a proxy of 'rwset.TxReadWriteSet' proto message and helps constructing Read-write set specifically for KV data model +type TxRwSet struct { + NsRwSets []*NsRwSet +} + +// NsRwSet encapsulates 'kvrwset.KVRWSet' proto message for a specific name space (chaincode) +type NsRwSet struct { + NameSpace string + KvRwSet *kvrwset.KVRWSet +} + +// ToProtoBytes constructs TxReadWriteSet proto message and serializes using protobuf Marshal +func (txRwSet *TxRwSet) ToProtoBytes() ([]byte, error) { + protoTxRWSet := &rwset.TxReadWriteSet{} + protoTxRWSet.DataModel = rwset.TxReadWriteSet_KV + for _, nsRwSet := range txRwSet.NsRwSets { + protoNsRwSet := &rwset.NsReadWriteSet{} + protoNsRwSet.Namespace = nsRwSet.NameSpace + protoRwSetBytes, err := proto.Marshal(nsRwSet.KvRwSet) + if err != nil { + return nil, err + } + protoNsRwSet.Rwset = protoRwSetBytes + protoTxRWSet.NsRwset = append(protoTxRWSet.NsRwset, protoNsRwSet) + } + protoTxRwSetBytes, err := proto.Marshal(protoTxRWSet) + if err != nil { + return nil, err + } + return protoTxRwSetBytes, nil +} + +// FromProtoBytes deserializes protobytes into TxReadWriteSet proto message and populates 'TxRwSet' +func (txRwSet *TxRwSet) FromProtoBytes(protoBytes []byte) error { + protoTxRwSet := &rwset.TxReadWriteSet{} + if err := proto.Unmarshal(protoBytes, protoTxRwSet); err != nil { + return err + } + protoNsRwSets := protoTxRwSet.GetNsRwset() + var nsRwSet *NsRwSet + for _, protoNsRwSet := range protoNsRwSets { + nsRwSet = &NsRwSet{} + nsRwSet.NameSpace = protoNsRwSet.Namespace + protoRwSetBytes := protoNsRwSet.Rwset + + protoKvRwSet := &kvrwset.KVRWSet{} + if err := proto.Unmarshal(protoRwSetBytes, protoKvRwSet); err != nil { + return err + } + nsRwSet.KvRwSet = protoKvRwSet + txRwSet.NsRwSets = append(txRwSet.NsRwSets, nsRwSet) + } + return nil +} + +// NewKVRead helps constructing proto message kvrwset.KVRead +func NewKVRead(key string, version *version.Height) *kvrwset.KVRead { + return &kvrwset.KVRead{Key: key, Version: newProtoVersion(version)} +} + +// NewVersion helps converting proto message kvrwset.Version to version.Height +func NewVersion(protoVersion *kvrwset.Version) *version.Height { + if protoVersion == nil { + return nil + } + return version.NewHeight(protoVersion.BlockNum, protoVersion.TxNum) +} + +func newProtoVersion(height *version.Height) *kvrwset.Version { + if height == nil { + return nil + } + return &kvrwset.Version{BlockNum: height.BlockNum, TxNum: height.TxNum} +} + +func newKVWrite(key string, value []byte) *kvrwset.KVWrite { + return &kvrwset.KVWrite{Key: key, IsDelete: value == nil, Value: value} +} diff --git a/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util_test.go b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util_test.go new file mode 100644 index 00000000000..9eaa4af48c5 --- /dev/null +++ b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util_test.go @@ -0,0 +1,65 @@ +/* +Copyright IBM Corp. 2016 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rwsetutil + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hyperledger/fabric/common/ledger/testutil" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" +) + +func TestTxRWSetMarshalUnmarshal(t *testing.T) { + txRwSet := &TxRwSet{} + + rqi1 := &kvrwset.RangeQueryInfo{StartKey: "k0", EndKey: "k9", ItrExhausted: true} + rqi1.SetRawReads([]*kvrwset.KVRead{ + &kvrwset.KVRead{Key: "k1", Version: &kvrwset.Version{BlockNum: 1, TxNum: 1}}, + &kvrwset.KVRead{Key: "k2", Version: &kvrwset.Version{BlockNum: 1, TxNum: 2}}, + }) + + rqi2 := &kvrwset.RangeQueryInfo{StartKey: "k00", EndKey: "k90", ItrExhausted: true} + rqi2.SetMerkelSummary(&kvrwset.QueryReadsMerkleSummary{MaxDegree: 5, MaxLevel: 4, MaxLevelHashes: [][]byte{[]byte("Hash-1"), []byte("Hash-2")}}) + + txRwSet.NsRwSets = []*NsRwSet{ + &NsRwSet{"ns1", &kvrwset.KVRWSet{ + []*kvrwset.KVRead{&kvrwset.KVRead{Key: "key1", Version: &kvrwset.Version{BlockNum: 1, TxNum: 1}}}, + []*kvrwset.RangeQueryInfo{rqi1}, + []*kvrwset.KVWrite{&kvrwset.KVWrite{Key: "key2", IsDelete: false, Value: []byte("value2")}}, + }}, + + &NsRwSet{"ns2", &kvrwset.KVRWSet{ + []*kvrwset.KVRead{&kvrwset.KVRead{Key: "key3", Version: &kvrwset.Version{BlockNum: 1, TxNum: 1}}}, + []*kvrwset.RangeQueryInfo{rqi2}, + []*kvrwset.KVWrite{&kvrwset.KVWrite{Key: "key3", IsDelete: false, Value: []byte("value3")}}, + }}, + + &NsRwSet{"ns3", &kvrwset.KVRWSet{ + []*kvrwset.KVRead{&kvrwset.KVRead{Key: "key4", Version: &kvrwset.Version{BlockNum: 1, TxNum: 1}}}, + nil, + []*kvrwset.KVWrite{&kvrwset.KVWrite{Key: "key4", IsDelete: false, Value: []byte("value4")}}, + }}, + } + + protoBytes, err := txRwSet.ToProtoBytes() + testutil.AssertNoError(t, err, "") + txRwSet1 := &TxRwSet{} + testutil.AssertNoError(t, txRwSet1.FromProtoBytes(protoBytes), "") + t.Logf("txRwSet=%s, txRwSet1=%s", spew.Sdump(txRwSet), spew.Sdump(txRwSet1)) + testutil.AssertEquals(t, txRwSet1, txRwSet) +} diff --git a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/helper.go b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/helper.go index 8198e7f318d..3b9d59b670f 100644 --- a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/helper.go +++ b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/helper.go @@ -19,18 +19,19 @@ package lockbasedtxmgr import ( commonledger "github.com/hyperledger/fabric/common/ledger" "github.com/hyperledger/fabric/core/ledger" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwset" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" "github.com/hyperledger/fabric/core/ledger/ledgerconfig" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" ) type queryHelper struct { - txmgr *LockBasedTxMgr - rwset *rwset.RWSet - itrs []*resultsItr - err error - doneInvoked bool + txmgr *LockBasedTxMgr + rwsetBuilder *rwsetutil.RWSetBuilder + itrs []*resultsItr + err error + doneInvoked bool } func (h *queryHelper) getState(ns string, key string) ([]byte, error) { @@ -40,8 +41,8 @@ func (h *queryHelper) getState(ns string, key string) ([]byte, error) { return nil, err } val, ver := decomposeVersionedValue(versionedValue) - if h.rwset != nil { - h.rwset.AddToReadSet(ns, key, ver) + if h.rwsetBuilder != nil { + h.rwsetBuilder.AddToReadSet(ns, key, ver) } return val, nil } @@ -55,8 +56,8 @@ func (h *queryHelper) getStateMultipleKeys(namespace string, keys []string) ([][ values := make([][]byte, len(versionedValues)) for i, versionedValue := range versionedValues { val, ver := decomposeVersionedValue(versionedValue) - if h.rwset != nil { - h.rwset.AddToReadSet(namespace, keys[i], ver) + if h.rwsetBuilder != nil { + h.rwsetBuilder.AddToReadSet(namespace, keys[i], ver) } values[i] = val } @@ -65,7 +66,7 @@ func (h *queryHelper) getStateMultipleKeys(namespace string, keys []string) ([][ func (h *queryHelper) getStateRangeScanIterator(namespace string, startKey string, endKey string) (commonledger.ResultsIterator, error) { h.checkDone() - itr, err := newResultsItr(namespace, startKey, endKey, h.txmgr.db, h.rwset, + itr, err := newResultsItr(namespace, startKey, endKey, h.txmgr.db, h.rwsetBuilder, ledgerconfig.IsQueryReadsHashingEnabled(), ledgerconfig.GetMaxDegreeQueryReadsHashing()) if err != nil { return nil, err @@ -79,7 +80,7 @@ func (h *queryHelper) executeQuery(namespace, query string) (commonledger.Result if err != nil { return nil, err } - return &queryResultsItr{DBItr: dbItr, RWSet: h.rwset}, nil + return &queryResultsItr{DBItr: dbItr, RWSetBuilder: h.rwsetBuilder}, nil } func (h *queryHelper) done() { @@ -90,11 +91,14 @@ func (h *queryHelper) done() { h.doneInvoked = true for _, itr := range h.itrs { itr.Close() - if h.rwset != nil { + if h.rwsetBuilder != nil { results, hash, err := itr.rangeQueryResultsHelper.Done() - itr.rangeQueryInfo.Results = results - itr.rangeQueryInfo.ResultHash = hash - + if results != nil { + itr.rangeQueryInfo.SetRawReads(results) + } + if hash != nil { + itr.rangeQueryInfo.SetMerkelSummary(hash) + } // TODO Change the method signature of done() to return error. However, this will have // repercurssions in the chaincode package, so deferring to a separate changeset. // For now, capture the first error that is encountered @@ -102,7 +106,7 @@ func (h *queryHelper) done() { if h.err == nil { h.err = err } - h.rwset.AddToRangeQuerySet(itr.ns, itr.rangeQueryInfo) + h.rwsetBuilder.AddToRangeQuerySet(itr.ns, itr.rangeQueryInfo) } } } @@ -121,25 +125,25 @@ type resultsItr struct { ns string endKey string dbItr statedb.ResultsIterator - rwSet *rwset.RWSet - rangeQueryInfo *rwset.RangeQueryInfo - rangeQueryResultsHelper *rwset.RangeQueryResultsHelper + rwSetBuilder *rwsetutil.RWSetBuilder + rangeQueryInfo *kvrwset.RangeQueryInfo + rangeQueryResultsHelper *rwsetutil.RangeQueryResultsHelper } func newResultsItr(ns string, startKey string, endKey string, - db statedb.VersionedDB, rwSet *rwset.RWSet, enableHashing bool, maxDegree int) (*resultsItr, error) { + db statedb.VersionedDB, rwsetBuilder *rwsetutil.RWSetBuilder, enableHashing bool, maxDegree uint32) (*resultsItr, error) { dbItr, err := db.GetStateRangeScanIterator(ns, startKey, endKey) if err != nil { return nil, err } itr := &resultsItr{ns: ns, dbItr: dbItr} // it's a simulation request so, enable capture of range query info - if rwSet != nil { - itr.rwSet = rwSet + if rwsetBuilder != nil { + itr.rwSetBuilder = rwsetBuilder itr.endKey = endKey // just set the StartKey... set the EndKey later below in the Next() method. - itr.rangeQueryInfo = &rwset.RangeQueryInfo{StartKey: startKey} - resultsHelper, err := rwset.NewRangeQueryResultsHelper(enableHashing, maxDegree) + itr.rangeQueryInfo = &kvrwset.RangeQueryInfo{StartKey: startKey} + resultsHelper, err := rwsetutil.NewRangeQueryResultsHelper(enableHashing, maxDegree) if err != nil { return nil, err } @@ -174,7 +178,7 @@ func (itr *resultsItr) Next() (commonledger.QueryResult, error) { // or b) the last key that was supplied in the original query (if the iterator is exhausted) // 2) The ItrExhausted - set to true if the iterator is going to return nil as a result of the Next() call func (itr *resultsItr) updateRangeQueryInfo(queryResult statedb.QueryResult) { - if itr.rwSet == nil { + if itr.rwSetBuilder == nil { return } @@ -186,7 +190,7 @@ func (itr *resultsItr) updateRangeQueryInfo(queryResult statedb.QueryResult) { return } versionedKV := queryResult.(*statedb.VersionedKV) - itr.rangeQueryResultsHelper.AddResult(rwset.NewKVRead(versionedKV.Key, versionedKV.Version)) + itr.rangeQueryResultsHelper.AddResult(rwsetutil.NewKVRead(versionedKV.Key, versionedKV.Version)) // Set the end key to the latest key retrieved by the caller. // Because, the caller may actually not invoke the Next() function again itr.rangeQueryInfo.EndKey = versionedKV.Key @@ -198,8 +202,8 @@ func (itr *resultsItr) Close() { } type queryResultsItr struct { - DBItr statedb.ResultsIterator - RWSet *rwset.RWSet + DBItr statedb.ResultsIterator + RWSetBuilder *rwsetutil.RWSetBuilder } // Next implements method in interface ledger.ResultsIterator @@ -215,8 +219,8 @@ func (itr *queryResultsItr) Next() (commonledger.QueryResult, error) { versionedQueryRecord := queryResult.(*statedb.VersionedQueryRecord) logger.Debugf("queryResultsItr.Next() returned a record:%s", string(versionedQueryRecord.Record)) - if itr.RWSet != nil { - itr.RWSet.AddToReadSet(versionedQueryRecord.Namespace, versionedQueryRecord.Key, versionedQueryRecord.Version) + if itr.RWSetBuilder != nil { + itr.RWSetBuilder.AddToReadSet(versionedQueryRecord.Namespace, versionedQueryRecord.Key, versionedQueryRecord.Version) } return &ledger.QueryRecord{Namespace: versionedQueryRecord.Namespace, Key: versionedQueryRecord.Key, Record: versionedQueryRecord.Record}, nil } diff --git a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_query_executer.go b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_query_executer.go index 38e6aa314cc..02a79ae20c7 100644 --- a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_query_executer.go +++ b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_query_executer.go @@ -28,7 +28,7 @@ type lockBasedQueryExecutor struct { } func newQueryExecutor(txmgr *LockBasedTxMgr) *lockBasedQueryExecutor { - helper := &queryHelper{txmgr: txmgr, rwset: nil} + helper := &queryHelper{txmgr: txmgr, rwsetBuilder: nil} id := util.GenerateUUID() logger.Debugf("constructing new query executor [%s]", id) return &lockBasedQueryExecutor{helper, id} diff --git a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_tx_simulator.go b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_tx_simulator.go index 1190e5bea00..3f8d4458b1b 100644 --- a/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_tx_simulator.go +++ b/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_tx_simulator.go @@ -20,21 +20,21 @@ import ( "errors" "github.com/hyperledger/fabric/common/util" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwset" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" ) // LockBasedTxSimulator is a transaction simulator used in `LockBasedTxMgr` type lockBasedTxSimulator struct { lockBasedQueryExecutor - rwset *rwset.RWSet + rwsetBuilder *rwsetutil.RWSetBuilder } func newLockBasedTxSimulator(txmgr *LockBasedTxMgr) *lockBasedTxSimulator { - rwset := rwset.NewRWSet() - helper := &queryHelper{txmgr: txmgr, rwset: rwset} + rwsetBuilder := rwsetutil.NewRWSetBuilder() + helper := &queryHelper{txmgr: txmgr, rwsetBuilder: rwsetBuilder} id := util.GenerateUUID() logger.Debugf("constructing new tx simulator [%s]", id) - return &lockBasedTxSimulator{lockBasedQueryExecutor{helper, id}, rwset} + return &lockBasedTxSimulator{lockBasedQueryExecutor{helper, id}, rwsetBuilder} } // GetState implements method in interface `ledger.TxSimulator` @@ -45,7 +45,7 @@ func (s *lockBasedTxSimulator) GetState(ns string, key string) ([]byte, error) { // SetState implements method in interface `ledger.TxSimulator` func (s *lockBasedTxSimulator) SetState(ns string, key string, value []byte) error { s.helper.checkDone() - s.rwset.AddToWriteSet(ns, key, value) + s.rwsetBuilder.AddToWriteSet(ns, key, value) return nil } @@ -71,7 +71,7 @@ func (s *lockBasedTxSimulator) GetTxSimulationResults() ([]byte, error) { if s.helper.err != nil { return nil, s.helper.err } - return s.rwset.GetTxReadWriteSet().Marshal() + return s.rwsetBuilder.GetTxReadWriteSet().ToProtoBytes() } // ExecuteUpdate implements method in interface `ledger.TxSimulator` diff --git a/core/ledger/kvledger/txmgmt/validator/statebasedval/range_query_validator.go b/core/ledger/kvledger/txmgmt/validator/statebasedval/range_query_validator.go index cd936d8fa4d..54743227401 100644 --- a/core/ledger/kvledger/txmgmt/validator/statebasedval/range_query_validator.go +++ b/core/ledger/kvledger/txmgmt/validator/statebasedval/range_query_validator.go @@ -19,22 +19,23 @@ package statebasedval import ( "bytes" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwset" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" ) type rangeQueryValidator interface { - init(rqInfo *rwset.RangeQueryInfo, itr statedb.ResultsIterator) error + init(rqInfo *kvrwset.RangeQueryInfo, itr statedb.ResultsIterator) error validate() (bool, error) } type rangeQueryResultsValidator struct { - rqInfo *rwset.RangeQueryInfo + rqInfo *kvrwset.RangeQueryInfo itr statedb.ResultsIterator } -func (v *rangeQueryResultsValidator) init(rqInfo *rwset.RangeQueryInfo, itr statedb.ResultsIterator) error { +func (v *rangeQueryResultsValidator) init(rqInfo *kvrwset.RangeQueryInfo, itr statedb.ResultsIterator) error { v.rqInfo = rqInfo v.itr = itr return nil @@ -44,7 +45,7 @@ func (v *rangeQueryResultsValidator) init(rqInfo *rwset.RangeQueryInfo, itr stat // and the iterator (latest view of the `committed state` i.e., db + updates). At first mismatch between the results // from the two sources (the range-query-info and the iterator), the validate retruns false. func (v *rangeQueryResultsValidator) validate() (bool, error) { - rqResults := v.rqInfo.Results + rqResults := v.rqInfo.GetRawReads().GetKvReads() itr := v.itr var result statedb.QueryResult var err error @@ -66,7 +67,7 @@ func (v *rangeQueryResultsValidator) validate() (bool, error) { logger.Debugf("key name mismatch: Key in rwset = [%s], key in query results = [%s]", kvRead.Key, versionedKV.Key) return false, nil } - if !version.AreSame(versionedKV.Version, kvRead.Version) { + if !version.AreSame(versionedKV.Version, convertToVersionHeight(kvRead.Version)) { logger.Debugf(`Version mismatch for key [%s]: Version in rwset = [%#v], latest version = [%#v]`, versionedKV.Key, versionedKV.Version, kvRead.Version) return false, nil @@ -87,16 +88,16 @@ func (v *rangeQueryResultsValidator) validate() (bool, error) { } type rangeQueryHashValidator struct { - rqInfo *rwset.RangeQueryInfo + rqInfo *kvrwset.RangeQueryInfo itr statedb.ResultsIterator - resultsHelper *rwset.RangeQueryResultsHelper + resultsHelper *rwsetutil.RangeQueryResultsHelper } -func (v *rangeQueryHashValidator) init(rqInfo *rwset.RangeQueryInfo, itr statedb.ResultsIterator) error { +func (v *rangeQueryHashValidator) init(rqInfo *kvrwset.RangeQueryInfo, itr statedb.ResultsIterator) error { v.rqInfo = rqInfo v.itr = itr var err error - v.resultsHelper, err = rwset.NewRangeQueryResultsHelper(true, rqInfo.ResultHash.MaxDegree) + v.resultsHelper, err = rwsetutil.NewRangeQueryResultsHelper(true, rqInfo.GetReadsMerkleHashes().MaxDegree) return err } @@ -111,8 +112,8 @@ func (v *rangeQueryHashValidator) init(rqInfo *rwset.RangeQueryInfo, itr statedb func (v *rangeQueryHashValidator) validate() (bool, error) { itr := v.itr lastMatchedIndex := -1 - inMerkle := v.rqInfo.ResultHash - var merkle *rwset.MerkleSummary + inMerkle := v.rqInfo.GetReadsMerkleHashes() + var merkle *kvrwset.QueryReadsMerkleSummary logger.Debugf("inMerkle: %#v", inMerkle) for { var result statedb.QueryResult @@ -130,7 +131,7 @@ func (v *rangeQueryHashValidator) validate() (bool, error) { return equals, nil } versionedKV := result.(*statedb.VersionedKV) - v.resultsHelper.AddResult(rwset.NewKVRead(versionedKV.Key, versionedKV.Version)) + v.resultsHelper.AddResult(rwsetutil.NewKVRead(versionedKV.Key, versionedKV.Version)) merkle := v.resultsHelper.GetMerkleSummary() if merkle.MaxLevel < inMerkle.MaxLevel { @@ -153,3 +154,7 @@ func (v *rangeQueryHashValidator) validate() (bool, error) { } } } + +func convertToVersionHeight(v *kvrwset.Version) *version.Height { + return version.NewHeight(v.BlockNum, v.TxNum) +} diff --git a/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator.go b/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator.go index 895badd437a..318022cd59d 100644 --- a/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator.go +++ b/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator.go @@ -17,11 +17,12 @@ limitations under the License. package statebasedval import ( - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwset" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" "github.com/hyperledger/fabric/core/ledger/util" "github.com/hyperledger/fabric/protos/common" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" "github.com/hyperledger/fabric/protos/peer" putils "github.com/hyperledger/fabric/protos/utils" logging "github.com/op/go-logging" @@ -41,7 +42,7 @@ func NewValidator(db statedb.VersionedDB) *Validator { } //validate endorser transaction -func (v *Validator) validateEndorserTX(envBytes []byte, doMVCCValidation bool, updates *statedb.UpdateBatch) (*rwset.TxReadWriteSet, peer.TxValidationCode, error) { +func (v *Validator) validateEndorserTX(envBytes []byte, doMVCCValidation bool, updates *statedb.UpdateBatch) (*rwsetutil.TxRwSet, peer.TxValidationCode, error) { // extract actions from the envelope message respPayload, err := putils.GetActionFromEnvelope(envBytes) if err != nil { @@ -49,15 +50,16 @@ func (v *Validator) validateEndorserTX(envBytes []byte, doMVCCValidation bool, u } //preparation for extracting RWSet from transaction - txRWSet := &rwset.TxReadWriteSet{} + txRWSet := &rwsetutil.TxRwSet{} // Get the Result from the Action // and then Unmarshal it into a TxReadWriteSet using custom unmarshalling - if err = txRWSet.Unmarshal(respPayload.Results); err != nil { + + if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil { return nil, peer.TxValidationCode_INVALID_OTHER_REASON, nil } - var txResult peer.TxValidationCode = peer.TxValidationCode_VALID + txResult := peer.TxValidationCode_VALID //mvccvalidation, may invalidate transaction if doMVCCValidation { @@ -134,10 +136,8 @@ func (v *Validator) ValidateAndPrepareBatch(block *common.Block, doMVCCValidatio if err != nil { return nil, err - } else { - txsFilter.SetFlag(txIndex, peer.TxValidationCode_VALID) } - + txsFilter.SetFlag(txIndex, peer.TxValidationCode_VALID) } else { logger.Errorf("Skipping transaction %d that's not an endorsement or configuration %d", txIndex, chdr.Type) txsFilter.SetFlag(txIndex, peer.TxValidationCode_UNKNOWN_TX_TYPE) @@ -156,10 +156,10 @@ func (v *Validator) ValidateAndPrepareBatch(block *common.Block, doMVCCValidatio return updates, nil } -func addWriteSetToBatch(txRWSet *rwset.TxReadWriteSet, txHeight *version.Height, batch *statedb.UpdateBatch) { - for _, nsRWSet := range txRWSet.NsRWs { +func addWriteSetToBatch(txRWSet *rwsetutil.TxRwSet, txHeight *version.Height, batch *statedb.UpdateBatch) { + for _, nsRWSet := range txRWSet.NsRwSets { ns := nsRWSet.NameSpace - for _, kvWrite := range nsRWSet.Writes { + for _, kvWrite := range nsRWSet.KvRwSet.Writes { if kvWrite.IsDelete { batch.Delete(ns, kvWrite.Key, txHeight) } else { @@ -169,29 +169,27 @@ func addWriteSetToBatch(txRWSet *rwset.TxReadWriteSet, txHeight *version.Height, } } -func (v *Validator) validateTx(txRWSet *rwset.TxReadWriteSet, updates *statedb.UpdateBatch) (peer.TxValidationCode, error) { - for _, nsRWSet := range txRWSet.NsRWs { +func (v *Validator) validateTx(txRWSet *rwsetutil.TxRwSet, updates *statedb.UpdateBatch) (peer.TxValidationCode, error) { + for _, nsRWSet := range txRWSet.NsRwSets { ns := nsRWSet.NameSpace - if valid, err := v.validateReadSet(ns, nsRWSet.Reads, updates); !valid || err != nil { + if valid, err := v.validateReadSet(ns, nsRWSet.KvRwSet.Reads, updates); !valid || err != nil { if err != nil { return peer.TxValidationCode(-1), err - } else { - return peer.TxValidationCode_MVCC_READ_CONFLICT, nil } + return peer.TxValidationCode_MVCC_READ_CONFLICT, nil } - if valid, err := v.validateRangeQueries(ns, nsRWSet.RangeQueriesInfo, updates); !valid || err != nil { + if valid, err := v.validateRangeQueries(ns, nsRWSet.KvRwSet.RangeQueriesInfo, updates); !valid || err != nil { if err != nil { return peer.TxValidationCode(-1), err - } else { - return peer.TxValidationCode_PHANTOM_READ_CONFLICT, nil } + return peer.TxValidationCode_PHANTOM_READ_CONFLICT, nil } } return peer.TxValidationCode_VALID, nil } -func (v *Validator) validateReadSet(ns string, kvReads []*rwset.KVRead, updates *statedb.UpdateBatch) (bool, error) { +func (v *Validator) validateReadSet(ns string, kvReads []*kvrwset.KVRead, updates *statedb.UpdateBatch) (bool, error) { for _, kvRead := range kvReads { if valid, err := v.validateKVRead(ns, kvRead, updates); !valid || err != nil { return valid, err @@ -203,7 +201,7 @@ func (v *Validator) validateReadSet(ns string, kvReads []*rwset.KVRead, updates // validateKVRead performs mvcc check for a key read during transaction simulation. // i.e., it checks whether a key/version combination is already updated in the statedb (by an already committed block) // or in the updates (by a preceding valid transaction in the current block) -func (v *Validator) validateKVRead(ns string, kvRead *rwset.KVRead, updates *statedb.UpdateBatch) (bool, error) { +func (v *Validator) validateKVRead(ns string, kvRead *kvrwset.KVRead, updates *statedb.UpdateBatch) (bool, error) { if updates.Exists(ns, kvRead.Key) { return false, nil } @@ -215,7 +213,8 @@ func (v *Validator) validateKVRead(ns string, kvRead *rwset.KVRead, updates *sta if versionedValue != nil { committedVersion = versionedValue.Version } - if !version.AreSame(committedVersion, kvRead.Version) { + + if !version.AreSame(committedVersion, rwsetutil.NewVersion(kvRead.Version)) { logger.Debugf("Version mismatch for key [%s:%s]. Committed version = [%s], Version in readSet [%s]", ns, kvRead.Key, committedVersion, kvRead.Version) return false, nil @@ -223,7 +222,7 @@ func (v *Validator) validateKVRead(ns string, kvRead *rwset.KVRead, updates *sta return true, nil } -func (v *Validator) validateRangeQueries(ns string, rangeQueriesInfo []*rwset.RangeQueryInfo, updates *statedb.UpdateBatch) (bool, error) { +func (v *Validator) validateRangeQueries(ns string, rangeQueriesInfo []*kvrwset.RangeQueryInfo, updates *statedb.UpdateBatch) (bool, error) { for _, rqi := range rangeQueriesInfo { if valid, err := v.validateRangeQuery(ns, rqi, updates); !valid || err != nil { return valid, err @@ -236,7 +235,7 @@ func (v *Validator) validateRangeQueries(ns string, rangeQueriesInfo []*rwset.Ra // checks whether the results of the range query are still the same when executed on the // statedb (latest state as of last committed block) + updates (prepared by the writes of preceding valid transactions // in the current block and yet to be committed as part of group commit at the end of the validation of the block) -func (v *Validator) validateRangeQuery(ns string, rangeQueryInfo *rwset.RangeQueryInfo, updates *statedb.UpdateBatch) (bool, error) { +func (v *Validator) validateRangeQuery(ns string, rangeQueryInfo *kvrwset.RangeQueryInfo, updates *statedb.UpdateBatch) (bool, error) { logger.Debugf("validateRangeQuery: ns=%s, rangeQueryInfo=%s", ns, rangeQueryInfo) // If during simulation, the caller had not exhausted the iterator so @@ -251,7 +250,7 @@ func (v *Validator) validateRangeQuery(ns string, rangeQueryInfo *rwset.RangeQue } defer combinedItr.Close() var validator rangeQueryValidator - if rangeQueryInfo.ResultHash != nil { + if rangeQueryInfo.GetReadsMerkleHashes() != nil { logger.Debug(`Hashing results are present in the range query info hence, initiating hashing based validation`) validator = &rangeQueryHashValidator{} } else { diff --git a/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator_test.go b/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator_test.go index 414023939e2..8a259371eac 100644 --- a/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator_test.go +++ b/core/ledger/kvledger/txmgmt/validator/statebasedval/state_based_validator_test.go @@ -21,12 +21,13 @@ import ( "testing" "github.com/hyperledger/fabric/common/ledger/testutil" - "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwset" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb/stateleveldb" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" "github.com/hyperledger/fabric/core/ledger/util" "github.com/hyperledger/fabric/protos/common" + "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset" "github.com/spf13/viper" ) @@ -54,28 +55,30 @@ func TestValidator(t *testing.T) { validator := NewValidator(db) //rwset1 should be valid - rwset1 := rwset.NewRWSet() - rwset1.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) - rwset1.AddToReadSet("ns2", "key2", nil) - checkValidation(t, validator, []*rwset.RWSet{rwset1}, []int{}) + rwsetBuilder1 := rwsetutil.NewRWSetBuilder() + rwsetBuilder1.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) + rwsetBuilder1.AddToReadSet("ns2", "key2", nil) + checkValidation(t, validator, []*rwsetutil.TxRwSet{rwsetBuilder1.GetTxReadWriteSet()}, []int{}) //rwset2 should not be valid - rwset2 := rwset.NewRWSet() - rwset2.AddToReadSet("ns1", "key1", version.NewHeight(1, 2)) - checkValidation(t, validator, []*rwset.RWSet{rwset2}, []int{0}) + rwsetBuilder2 := rwsetutil.NewRWSetBuilder() + rwsetBuilder2.AddToReadSet("ns1", "key1", version.NewHeight(1, 2)) + checkValidation(t, validator, []*rwsetutil.TxRwSet{rwsetBuilder2.GetTxReadWriteSet()}, []int{0}) //rwset3 should not be valid - rwset3 := rwset.NewRWSet() - rwset3.AddToReadSet("ns1", "key1", nil) - checkValidation(t, validator, []*rwset.RWSet{rwset3}, []int{0}) + rwsetBuilder3 := rwsetutil.NewRWSetBuilder() + rwsetBuilder3.AddToReadSet("ns1", "key1", nil) + checkValidation(t, validator, []*rwsetutil.TxRwSet{rwsetBuilder3.GetTxReadWriteSet()}, []int{0}) // rwset4 and rwset5 within same block - rwset4 should be valid and makes rwset5 as invalid - rwset4 := rwset.NewRWSet() - rwset4.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) - rwset4.AddToWriteSet("ns1", "key1", []byte("value1_new")) - rwset5 := rwset.NewRWSet() - rwset5.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) - checkValidation(t, validator, []*rwset.RWSet{rwset4, rwset5}, []int{1}) + rwsetBuilder4 := rwsetutil.NewRWSetBuilder() + rwsetBuilder4.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) + rwsetBuilder4.AddToWriteSet("ns1", "key1", []byte("value1_new")) + + rwsetBuilder5 := rwsetutil.NewRWSetBuilder() + rwsetBuilder5.AddToReadSet("ns1", "key1", version.NewHeight(1, 1)) + checkValidation(t, validator, + []*rwsetutil.TxRwSet{rwsetBuilder4.GetTxReadWriteSet(), rwsetBuilder5.GetTxReadWriteSet()}, []int{1}) } func TestPhantomValidation(t *testing.T) { @@ -97,51 +100,59 @@ func TestPhantomValidation(t *testing.T) { validator := NewValidator(db) //rwset1 should be valid - rwset1 := rwset.NewRWSet() - rqi1 := &rwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: true} - rqi1.Results = []*rwset.KVRead{rwset.NewKVRead("key2", version.NewHeight(1, 2)), - rwset.NewKVRead("key3", version.NewHeight(1, 3))} - rwset1.AddToRangeQuerySet("ns1", rqi1) - checkValidation(t, validator, []*rwset.RWSet{rwset1}, []int{}) + rwsetBuilder1 := rwsetutil.NewRWSetBuilder() + rqi1 := &kvrwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: true} + rqi1.SetRawReads([]*kvrwset.KVRead{ + rwsetutil.NewKVRead("key2", version.NewHeight(1, 2)), + rwsetutil.NewKVRead("key3", version.NewHeight(1, 3))}) + rwsetBuilder1.AddToRangeQuerySet("ns1", rqi1) + checkValidation(t, validator, []*rwsetutil.TxRwSet{rwsetBuilder1.GetTxReadWriteSet()}, []int{}) //rwset2 should not be valid - Version of key4 changed - rwset2 := rwset.NewRWSet() - rqi2 := &rwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: false} - rqi2.Results = []*rwset.KVRead{rwset.NewKVRead("key2", version.NewHeight(1, 2)), - rwset.NewKVRead("key3", version.NewHeight(1, 3)), - rwset.NewKVRead("key4", version.NewHeight(1, 3))} - rwset2.AddToRangeQuerySet("ns1", rqi2) - checkValidation(t, validator, []*rwset.RWSet{rwset2}, []int{1}) + rwsetBuilder2 := rwsetutil.NewRWSetBuilder() + rqi2 := &kvrwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: false} + rqi2.SetRawReads([]*kvrwset.KVRead{ + rwsetutil.NewKVRead("key2", version.NewHeight(1, 2)), + rwsetutil.NewKVRead("key3", version.NewHeight(1, 3)), + rwsetutil.NewKVRead("key4", version.NewHeight(1, 3))}) + rwsetBuilder2.AddToRangeQuerySet("ns1", rqi2) + checkValidation(t, validator, []*rwsetutil.TxRwSet{rwsetBuilder2.GetTxReadWriteSet()}, []int{1}) //rwset3 should not be valid - simulate key3 got commited to db - rwset3 := rwset.NewRWSet() - rqi3 := &rwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: false} - rqi3.Results = []*rwset.KVRead{rwset.NewKVRead("key2", version.NewHeight(1, 2)), - rwset.NewKVRead("key4", version.NewHeight(1, 4))} - rwset3.AddToRangeQuerySet("ns1", rqi3) - checkValidation(t, validator, []*rwset.RWSet{rwset3}, []int{1}) + rwsetBuilder3 := rwsetutil.NewRWSetBuilder() + rqi3 := &kvrwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: false} + rqi3.SetRawReads([]*kvrwset.KVRead{ + rwsetutil.NewKVRead("key2", version.NewHeight(1, 2)), + rwsetutil.NewKVRead("key4", version.NewHeight(1, 4))}) + rwsetBuilder3.AddToRangeQuerySet("ns1", rqi3) + checkValidation(t, validator, []*rwsetutil.TxRwSet{rwsetBuilder3.GetTxReadWriteSet()}, []int{1}) // //Remove a key in rwset4 and rwset5 should become invalid - rwset4 := rwset.NewRWSet() - rwset4.AddToWriteSet("ns1", "key3", nil) - rwset5 := rwset.NewRWSet() - rqi5 := &rwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: false} - rqi5.Results = []*rwset.KVRead{rwset.NewKVRead("key2", version.NewHeight(1, 2)), - rwset.NewKVRead("key3", version.NewHeight(1, 3)), - rwset.NewKVRead("key4", version.NewHeight(1, 4))} - rwset5.AddToRangeQuerySet("ns1", rqi5) - checkValidation(t, validator, []*rwset.RWSet{rwset4, rwset5}, []int{1}) + rwsetBuilder4 := rwsetutil.NewRWSetBuilder() + rwsetBuilder4.AddToWriteSet("ns1", "key3", nil) + rwsetBuilder5 := rwsetutil.NewRWSetBuilder() + rqi5 := &kvrwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: false} + rqi5.SetRawReads([]*kvrwset.KVRead{ + rwsetutil.NewKVRead("key2", version.NewHeight(1, 2)), + rwsetutil.NewKVRead("key3", version.NewHeight(1, 3)), + rwsetutil.NewKVRead("key4", version.NewHeight(1, 4))}) + rwsetBuilder5.AddToRangeQuerySet("ns1", rqi5) + checkValidation(t, validator, []*rwsetutil.TxRwSet{ + rwsetBuilder4.GetTxReadWriteSet(), rwsetBuilder5.GetTxReadWriteSet()}, []int{1}) //Add a key in rwset6 and rwset7 should become invalid - rwset6 := rwset.NewRWSet() - rwset6.AddToWriteSet("ns1", "key2_1", []byte("value2_1")) - rwset7 := rwset.NewRWSet() - rqi7 := &rwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: false} - rqi7.Results = []*rwset.KVRead{rwset.NewKVRead("key2", version.NewHeight(1, 2)), - rwset.NewKVRead("key3", version.NewHeight(1, 3)), - rwset.NewKVRead("key4", version.NewHeight(1, 4))} - rwset7.AddToRangeQuerySet("ns1", rqi7) - checkValidation(t, validator, []*rwset.RWSet{rwset6, rwset7}, []int{1}) + rwsetBuilder6 := rwsetutil.NewRWSetBuilder() + rwsetBuilder6.AddToWriteSet("ns1", "key2_1", []byte("value2_1")) + + rwsetBuilder7 := rwsetutil.NewRWSetBuilder() + rqi7 := &kvrwset.RangeQueryInfo{StartKey: "key2", EndKey: "key4", ItrExhausted: false} + rqi7.SetRawReads([]*kvrwset.KVRead{ + rwsetutil.NewKVRead("key2", version.NewHeight(1, 2)), + rwsetutil.NewKVRead("key3", version.NewHeight(1, 3)), + rwsetutil.NewKVRead("key4", version.NewHeight(1, 4))}) + rwsetBuilder7.AddToRangeQuerySet("ns1", rqi7) + checkValidation(t, validator, []*rwsetutil.TxRwSet{ + rwsetBuilder6.GetTxReadWriteSet(), rwsetBuilder7.GetTxReadWriteSet()}, []int{1}) } func TestPhantomHashBasedValidation(t *testing.T) { @@ -166,43 +177,43 @@ func TestPhantomHashBasedValidation(t *testing.T) { validator := NewValidator(db) - rwset1 := rwset.NewRWSet() - rqi1 := &rwset.RangeQueryInfo{StartKey: "key2", EndKey: "key9", ItrExhausted: true} - kvReadsDuringSimulation1 := []*rwset.KVRead{ - rwset.NewKVRead("key2", version.NewHeight(1, 2)), - rwset.NewKVRead("key3", version.NewHeight(1, 3)), - rwset.NewKVRead("key4", version.NewHeight(1, 4)), - rwset.NewKVRead("key5", version.NewHeight(1, 5)), - rwset.NewKVRead("key6", version.NewHeight(1, 6)), - rwset.NewKVRead("key7", version.NewHeight(1, 7)), - rwset.NewKVRead("key8", version.NewHeight(1, 8)), + rwsetBuilder1 := rwsetutil.NewRWSetBuilder() + rqi1 := &kvrwset.RangeQueryInfo{StartKey: "key2", EndKey: "key9", ItrExhausted: true} + kvReadsDuringSimulation1 := []*kvrwset.KVRead{ + rwsetutil.NewKVRead("key2", version.NewHeight(1, 2)), + rwsetutil.NewKVRead("key3", version.NewHeight(1, 3)), + rwsetutil.NewKVRead("key4", version.NewHeight(1, 4)), + rwsetutil.NewKVRead("key5", version.NewHeight(1, 5)), + rwsetutil.NewKVRead("key6", version.NewHeight(1, 6)), + rwsetutil.NewKVRead("key7", version.NewHeight(1, 7)), + rwsetutil.NewKVRead("key8", version.NewHeight(1, 8)), } - rqi1.ResultHash = buildTestHashResults(t, 2, kvReadsDuringSimulation1) - rwset1.AddToRangeQuerySet("ns1", rqi1) - checkValidation(t, validator, []*rwset.RWSet{rwset1}, []int{}) - - rwset2 := rwset.NewRWSet() - rqi2 := &rwset.RangeQueryInfo{StartKey: "key1", EndKey: "key9", ItrExhausted: false} - kvReadsDuringSimulation2 := []*rwset.KVRead{ - rwset.NewKVRead("key1", version.NewHeight(1, 1)), - rwset.NewKVRead("key2", version.NewHeight(1, 2)), - rwset.NewKVRead("key3", version.NewHeight(1, 2)), - rwset.NewKVRead("key4", version.NewHeight(1, 4)), - rwset.NewKVRead("key5", version.NewHeight(1, 5)), - rwset.NewKVRead("key6", version.NewHeight(1, 6)), - rwset.NewKVRead("key7", version.NewHeight(1, 7)), - rwset.NewKVRead("key8", version.NewHeight(1, 8)), - rwset.NewKVRead("key9", version.NewHeight(1, 9)), + rqi1.SetMerkelSummary(buildTestHashResults(t, 2, kvReadsDuringSimulation1)) + rwsetBuilder1.AddToRangeQuerySet("ns1", rqi1) + checkValidation(t, validator, []*rwsetutil.TxRwSet{rwsetBuilder1.GetTxReadWriteSet()}, []int{}) + + rwsetBuilder2 := rwsetutil.NewRWSetBuilder() + rqi2 := &kvrwset.RangeQueryInfo{StartKey: "key1", EndKey: "key9", ItrExhausted: false} + kvReadsDuringSimulation2 := []*kvrwset.KVRead{ + rwsetutil.NewKVRead("key1", version.NewHeight(1, 1)), + rwsetutil.NewKVRead("key2", version.NewHeight(1, 2)), + rwsetutil.NewKVRead("key3", version.NewHeight(1, 2)), + rwsetutil.NewKVRead("key4", version.NewHeight(1, 4)), + rwsetutil.NewKVRead("key5", version.NewHeight(1, 5)), + rwsetutil.NewKVRead("key6", version.NewHeight(1, 6)), + rwsetutil.NewKVRead("key7", version.NewHeight(1, 7)), + rwsetutil.NewKVRead("key8", version.NewHeight(1, 8)), + rwsetutil.NewKVRead("key9", version.NewHeight(1, 9)), } - rqi2.ResultHash = buildTestHashResults(t, 2, kvReadsDuringSimulation2) - rwset2.AddToRangeQuerySet("ns1", rqi2) - checkValidation(t, validator, []*rwset.RWSet{rwset2}, []int{1}) + rqi2.SetMerkelSummary(buildTestHashResults(t, 2, kvReadsDuringSimulation2)) + rwsetBuilder2.AddToRangeQuerySet("ns1", rqi2) + checkValidation(t, validator, []*rwsetutil.TxRwSet{rwsetBuilder2.GetTxReadWriteSet()}, []int{1}) } -func checkValidation(t *testing.T, validator *Validator, rwsets []*rwset.RWSet, invalidTxIndexes []int) { +func checkValidation(t *testing.T, validator *Validator, rwsets []*rwsetutil.TxRwSet, invalidTxIndexes []int) { simulationResults := [][]byte{} - for _, readWriteSet := range rwsets { - sr, err := readWriteSet.GetTxReadWriteSet().Marshal() + for _, txRWS := range rwsets { + sr, err := txRWS.ToProtoBytes() testutil.AssertNoError(t, err, "") simulationResults = append(simulationResults, sr) } @@ -221,11 +232,11 @@ func checkValidation(t *testing.T, validator *Validator, rwsets []*rwset.RWSet, //TODO Add the check for exact txnum that is marked invlid when bitarray is in place } -func buildTestHashResults(t *testing.T, maxDegree int, kvReads []*rwset.KVRead) *rwset.MerkleSummary { +func buildTestHashResults(t *testing.T, maxDegree int, kvReads []*kvrwset.KVRead) *kvrwset.QueryReadsMerkleSummary { if len(kvReads) <= maxDegree { t.Fatal("This method should be called with number of KVReads more than maxDegree; Else, hashing won't be performedrwset") } - helper, _ := rwset.NewRangeQueryResultsHelper(true, maxDegree) + helper, _ := rwsetutil.NewRangeQueryResultsHelper(true, uint32(maxDegree)) for _, kvRead := range kvReads { helper.AddResult(kvRead) } diff --git a/core/ledger/ledgerconfig/ledger_config.go b/core/ledger/ledgerconfig/ledger_config.go index 357406bc29d..348ff2e0087 100644 --- a/core/ledger/ledgerconfig/ledger_config.go +++ b/core/ledger/ledgerconfig/ledger_config.go @@ -113,6 +113,6 @@ func IsQueryReadsHashingEnabled() bool { // GetMaxDegreeQueryReadsHashing return the maximum degree of the merkle tree for hashes of // of range query results for phantom item validation // For more details - see description in kvledger/txmgmt/rwset/query_results_helper.go -func GetMaxDegreeQueryReadsHashing() int { +func GetMaxDegreeQueryReadsHashing() uint32 { return 50 } diff --git a/protos/ledger/rwset/kvrwset/helper.go b/protos/ledger/rwset/kvrwset/helper.go new file mode 100644 index 00000000000..bf8353d6a82 --- /dev/null +++ b/protos/ledger/rwset/kvrwset/helper.go @@ -0,0 +1,49 @@ +/* +Copyright IBM Corp. 2016 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kvrwset + +import ( + "bytes" +) + +// SetRawReads sets the 'readsInfo' field to raw KVReads performed by the query +func (rqi *RangeQueryInfo) SetRawReads(kvReads []*KVRead) { + rqi.ReadsInfo = &RangeQueryInfo_RawReads{&QueryReads{kvReads}} +} + +// SetMerkelSummary sets the 'readsInfo' field to merkle summary of the raw KVReads of query results +func (rqi *RangeQueryInfo) SetMerkelSummary(merkleSummary *QueryReadsMerkleSummary) { + rqi.ReadsInfo = &RangeQueryInfo_ReadsMerkleHashes{merkleSummary} +} + +// Equal verifies whether the give MerkleSummary is equals to this +func (ms *QueryReadsMerkleSummary) Equal(anotherMS *QueryReadsMerkleSummary) bool { + if anotherMS == nil { + return false + } + if ms.MaxDegree != anotherMS.MaxDegree || + ms.MaxLevel != anotherMS.MaxLevel || + len(ms.MaxLevelHashes) != len(anotherMS.MaxLevelHashes) { + return false + } + for i := 0; i < len(ms.MaxLevelHashes); i++ { + if !bytes.Equal(ms.MaxLevelHashes[i], anotherMS.MaxLevelHashes[i]) { + return false + } + } + return true +} diff --git a/protos/ledger/rwset/kvrwset/kv_rwset.pb.go b/protos/ledger/rwset/kvrwset/kv_rwset.pb.go new file mode 100644 index 00000000000..f75d41569f5 --- /dev/null +++ b/protos/ledger/rwset/kvrwset/kv_rwset.pb.go @@ -0,0 +1,326 @@ +// Code generated by protoc-gen-go. +// source: ledger/rwset/kvrwset/kv_rwset.proto +// DO NOT EDIT! + +/* +Package kvrwset is a generated protocol buffer package. + +It is generated from these files: + ledger/rwset/kvrwset/kv_rwset.proto + +It has these top-level messages: + KVRWSet + KVRead + KVWrite + Version + RangeQueryInfo + QueryReads + QueryReadsMerkleSummary +*/ +package kvrwset + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// KVRWSet encapsulates the read-write set for a chaincode that operates upon a KV or Document data model +type KVRWSet struct { + Reads []*KVRead `protobuf:"bytes,1,rep,name=reads" json:"reads,omitempty"` + RangeQueriesInfo []*RangeQueryInfo `protobuf:"bytes,2,rep,name=range_queries_info,json=rangeQueriesInfo" json:"range_queries_info,omitempty"` + Writes []*KVWrite `protobuf:"bytes,3,rep,name=writes" json:"writes,omitempty"` +} + +func (m *KVRWSet) Reset() { *m = KVRWSet{} } +func (m *KVRWSet) String() string { return proto.CompactTextString(m) } +func (*KVRWSet) ProtoMessage() {} +func (*KVRWSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *KVRWSet) GetReads() []*KVRead { + if m != nil { + return m.Reads + } + return nil +} + +func (m *KVRWSet) GetRangeQueriesInfo() []*RangeQueryInfo { + if m != nil { + return m.RangeQueriesInfo + } + return nil +} + +func (m *KVRWSet) GetWrites() []*KVWrite { + if m != nil { + return m.Writes + } + return nil +} + +// KVRead captures a read operation performed during transaction simulation +// A 'nil' version indicates a non-existing key read by the transaction +type KVRead struct { + Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *KVRead) Reset() { *m = KVRead{} } +func (m *KVRead) String() string { return proto.CompactTextString(m) } +func (*KVRead) ProtoMessage() {} +func (*KVRead) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *KVRead) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} + +// KVWrite captures a write (update/delete) operation performed during transaction simulation +type KVWrite struct { + Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + IsDelete bool `protobuf:"varint,2,opt,name=is_delete,json=isDelete" json:"is_delete,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *KVWrite) Reset() { *m = KVWrite{} } +func (m *KVWrite) String() string { return proto.CompactTextString(m) } +func (*KVWrite) ProtoMessage() {} +func (*KVWrite) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// Version encapsulates the version of a Key +// A version of a committed key is maintained as the height of the transaction that committed the key. +// The height is represenetd as a tuple where the txNum is the height of the transaction +// (starting with 1) within block +type Version struct { + BlockNum uint64 `protobuf:"varint,1,opt,name=block_num,json=blockNum" json:"block_num,omitempty"` + TxNum uint64 `protobuf:"varint,2,opt,name=tx_num,json=txNum" json:"tx_num,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +// RangeQueryInfo encapsulates the details of a range query performed by a transaction during simulation. +// This helps protect transactions from phantom reads by varifying during validation whether any new items +// got committed within the given range between transaction simuation and validation +// (in addition to regular checks for updates/deletes of the existing items). +// readInfo field contains either the KVReads (for the items read by the range query) or a merkle-tree hash +// if the KVReads exceeds a pre-configured numbers +type RangeQueryInfo struct { + StartKey string `protobuf:"bytes,1,opt,name=start_key,json=startKey" json:"start_key,omitempty"` + EndKey string `protobuf:"bytes,2,opt,name=end_key,json=endKey" json:"end_key,omitempty"` + ItrExhausted bool `protobuf:"varint,3,opt,name=itr_exhausted,json=itrExhausted" json:"itr_exhausted,omitempty"` + // Types that are valid to be assigned to ReadsInfo: + // *RangeQueryInfo_RawReads + // *RangeQueryInfo_ReadsMerkleHashes + ReadsInfo isRangeQueryInfo_ReadsInfo `protobuf_oneof:"reads_info"` +} + +func (m *RangeQueryInfo) Reset() { *m = RangeQueryInfo{} } +func (m *RangeQueryInfo) String() string { return proto.CompactTextString(m) } +func (*RangeQueryInfo) ProtoMessage() {} +func (*RangeQueryInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type isRangeQueryInfo_ReadsInfo interface { + isRangeQueryInfo_ReadsInfo() +} + +type RangeQueryInfo_RawReads struct { + RawReads *QueryReads `protobuf:"bytes,4,opt,name=raw_reads,json=rawReads,oneof"` +} +type RangeQueryInfo_ReadsMerkleHashes struct { + ReadsMerkleHashes *QueryReadsMerkleSummary `protobuf:"bytes,5,opt,name=reads_merkle_hashes,json=readsMerkleHashes,oneof"` +} + +func (*RangeQueryInfo_RawReads) isRangeQueryInfo_ReadsInfo() {} +func (*RangeQueryInfo_ReadsMerkleHashes) isRangeQueryInfo_ReadsInfo() {} + +func (m *RangeQueryInfo) GetReadsInfo() isRangeQueryInfo_ReadsInfo { + if m != nil { + return m.ReadsInfo + } + return nil +} + +func (m *RangeQueryInfo) GetRawReads() *QueryReads { + if x, ok := m.GetReadsInfo().(*RangeQueryInfo_RawReads); ok { + return x.RawReads + } + return nil +} + +func (m *RangeQueryInfo) GetReadsMerkleHashes() *QueryReadsMerkleSummary { + if x, ok := m.GetReadsInfo().(*RangeQueryInfo_ReadsMerkleHashes); ok { + return x.ReadsMerkleHashes + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RangeQueryInfo) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RangeQueryInfo_OneofMarshaler, _RangeQueryInfo_OneofUnmarshaler, _RangeQueryInfo_OneofSizer, []interface{}{ + (*RangeQueryInfo_RawReads)(nil), + (*RangeQueryInfo_ReadsMerkleHashes)(nil), + } +} + +func _RangeQueryInfo_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RangeQueryInfo) + // reads_info + switch x := m.ReadsInfo.(type) { + case *RangeQueryInfo_RawReads: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RawReads); err != nil { + return err + } + case *RangeQueryInfo_ReadsMerkleHashes: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReadsMerkleHashes); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RangeQueryInfo.ReadsInfo has unexpected type %T", x) + } + return nil +} + +func _RangeQueryInfo_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RangeQueryInfo) + switch tag { + case 4: // reads_info.raw_reads + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryReads) + err := b.DecodeMessage(msg) + m.ReadsInfo = &RangeQueryInfo_RawReads{msg} + return true, err + case 5: // reads_info.reads_merkle_hashes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryReadsMerkleSummary) + err := b.DecodeMessage(msg) + m.ReadsInfo = &RangeQueryInfo_ReadsMerkleHashes{msg} + return true, err + default: + return false, nil + } +} + +func _RangeQueryInfo_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RangeQueryInfo) + // reads_info + switch x := m.ReadsInfo.(type) { + case *RangeQueryInfo_RawReads: + s := proto.Size(x.RawReads) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RangeQueryInfo_ReadsMerkleHashes: + s := proto.Size(x.ReadsMerkleHashes) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// QueryReads encapsulates the KVReads for the items read by a transaction as a result of a query execution +type QueryReads struct { + KvReads []*KVRead `protobuf:"bytes,1,rep,name=kv_reads,json=kvReads" json:"kv_reads,omitempty"` +} + +func (m *QueryReads) Reset() { *m = QueryReads{} } +func (m *QueryReads) String() string { return proto.CompactTextString(m) } +func (*QueryReads) ProtoMessage() {} +func (*QueryReads) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *QueryReads) GetKvReads() []*KVRead { + if m != nil { + return m.KvReads + } + return nil +} + +// QueryReadsMerkleSummary encapsulates the Merkle-tree hashes for the QueryReads +// This allows to reduce the size of RWSet in the presence of query results +// by storing certain hashes instead of actual results. +// maxDegree field refers to the maximum number of children in the tree at any level +// maxLevel field contains the lowest level which has lesser nodes than maxDegree (starting from leaf level) +type QueryReadsMerkleSummary struct { + MaxDegree uint32 `protobuf:"varint,1,opt,name=max_degree,json=maxDegree" json:"max_degree,omitempty"` + MaxLevel uint32 `protobuf:"varint,2,opt,name=max_level,json=maxLevel" json:"max_level,omitempty"` + MaxLevelHashes [][]byte `protobuf:"bytes,3,rep,name=max_level_hashes,json=maxLevelHashes,proto3" json:"max_level_hashes,omitempty"` +} + +func (m *QueryReadsMerkleSummary) Reset() { *m = QueryReadsMerkleSummary{} } +func (m *QueryReadsMerkleSummary) String() string { return proto.CompactTextString(m) } +func (*QueryReadsMerkleSummary) ProtoMessage() {} +func (*QueryReadsMerkleSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func init() { + proto.RegisterType((*KVRWSet)(nil), "kvrwset.KVRWSet") + proto.RegisterType((*KVRead)(nil), "kvrwset.KVRead") + proto.RegisterType((*KVWrite)(nil), "kvrwset.KVWrite") + proto.RegisterType((*Version)(nil), "kvrwset.Version") + proto.RegisterType((*RangeQueryInfo)(nil), "kvrwset.RangeQueryInfo") + proto.RegisterType((*QueryReads)(nil), "kvrwset.QueryReads") + proto.RegisterType((*QueryReadsMerkleSummary)(nil), "kvrwset.QueryReadsMerkleSummary") +} + +func init() { proto.RegisterFile("ledger/rwset/kvrwset/kv_rwset.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 534 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x53, 0x5d, 0x8b, 0xd3, 0x40, + 0x14, 0xdd, 0xb4, 0xdb, 0x26, 0xbd, 0x76, 0xd7, 0x3a, 0xab, 0x34, 0x20, 0x42, 0xc9, 0x22, 0x94, + 0x7d, 0x68, 0x61, 0x7d, 0x51, 0xc4, 0x17, 0xd9, 0x95, 0x4a, 0xb5, 0xe0, 0x2c, 0x74, 0xc1, 0x97, + 0x30, 0x6d, 0x6e, 0xdb, 0xd0, 0x7c, 0xac, 0x33, 0x93, 0x36, 0x7d, 0x12, 0x7f, 0x8a, 0xff, 0x54, + 0xe6, 0x26, 0x69, 0x5d, 0xa8, 0x3e, 0x65, 0xe6, 0x9c, 0x7b, 0xce, 0xdc, 0x39, 0xb9, 0x03, 0x97, + 0x11, 0x06, 0x4b, 0x94, 0x43, 0xb9, 0x55, 0xa8, 0x87, 0xeb, 0x4d, 0xf5, 0xf5, 0x69, 0x31, 0x78, + 0x90, 0xa9, 0x4e, 0x99, 0x5d, 0xe2, 0xde, 0x6f, 0x0b, 0xec, 0xf1, 0x94, 0xdf, 0xdf, 0xa1, 0x66, + 0xaf, 0xa1, 0x21, 0x51, 0x04, 0xca, 0xb5, 0x7a, 0xf5, 0xfe, 0x93, 0xeb, 0xa7, 0x83, 0xb2, 0x68, + 0x30, 0x9e, 0x72, 0x14, 0x01, 0x2f, 0x58, 0x76, 0x0b, 0x4c, 0x8a, 0x64, 0x89, 0xfe, 0x8f, 0x0c, + 0x65, 0x88, 0xca, 0x0f, 0x93, 0x45, 0xea, 0xd6, 0x48, 0xd3, 0xdd, 0x6b, 0xb8, 0x29, 0xf9, 0x96, + 0xa1, 0xdc, 0x7d, 0x4e, 0x16, 0x29, 0xef, 0xc8, 0x6a, 0x1f, 0xa2, 0x32, 0x08, 0xeb, 0x43, 0x73, + 0x2b, 0x43, 0x8d, 0xca, 0xad, 0x93, 0xb4, 0xf3, 0xd7, 0x71, 0xf7, 0x86, 0xe0, 0x25, 0xef, 0x7d, + 0x82, 0x66, 0xd1, 0x01, 0xeb, 0x40, 0x7d, 0x8d, 0x3b, 0xd7, 0xea, 0x59, 0xfd, 0x16, 0x37, 0x4b, + 0x76, 0x05, 0xf6, 0x06, 0xa5, 0x0a, 0xd3, 0xc4, 0xad, 0xf5, 0xac, 0x47, 0x36, 0xd3, 0x02, 0xe7, + 0x55, 0x81, 0x37, 0x31, 0x57, 0x25, 0xeb, 0x23, 0x46, 0x2f, 0xa1, 0x15, 0x2a, 0x3f, 0xc0, 0x08, + 0x35, 0x92, 0x95, 0xc3, 0x9d, 0x50, 0xdd, 0xd0, 0x9e, 0x3d, 0x87, 0xc6, 0x46, 0x44, 0x19, 0xba, + 0xf5, 0x9e, 0xd5, 0x6f, 0xf3, 0x62, 0xe3, 0x7d, 0x00, 0xbb, 0x3c, 0xc3, 0xa8, 0x67, 0x51, 0x3a, + 0x5f, 0xfb, 0x49, 0x16, 0x93, 0xeb, 0x29, 0x77, 0x08, 0x98, 0x64, 0x31, 0x7b, 0x01, 0x4d, 0x9d, + 0x13, 0x53, 0x23, 0xa6, 0xa1, 0xf3, 0x49, 0x16, 0x7b, 0xbf, 0x6a, 0x70, 0xfe, 0x38, 0x25, 0x63, + 0xa3, 0xb4, 0x90, 0xda, 0x3f, 0x34, 0xe7, 0x10, 0x30, 0xc6, 0x1d, 0xeb, 0x82, 0x8d, 0x49, 0x40, + 0x54, 0x8d, 0xa8, 0x26, 0x26, 0x81, 0x21, 0x2e, 0xe1, 0x2c, 0xd4, 0xd2, 0xc7, 0x7c, 0x25, 0x32, + 0xa5, 0x31, 0xa0, 0x2e, 0x1d, 0xde, 0x0e, 0xb5, 0xbc, 0xad, 0x30, 0x76, 0x0d, 0x2d, 0x29, 0xb6, + 0x7e, 0xf1, 0x83, 0x4f, 0x29, 0xaa, 0x8b, 0x7d, 0x54, 0xd4, 0x81, 0x49, 0x58, 0x8d, 0x4e, 0xb8, + 0x23, 0xc5, 0x96, 0xd6, 0x8c, 0xc3, 0x05, 0xd5, 0xfb, 0x31, 0xca, 0x75, 0x84, 0xfe, 0x4a, 0xa8, + 0x15, 0x2a, 0xb7, 0x41, 0xea, 0xde, 0x11, 0xf5, 0x57, 0xaa, 0xbb, 0xcb, 0xe2, 0x58, 0xc8, 0xdd, + 0xe8, 0x84, 0x3f, 0x93, 0x07, 0x74, 0x44, 0xe2, 0x8f, 0x6d, 0x80, 0xc2, 0xd3, 0x4c, 0x8d, 0xf7, + 0x16, 0xe0, 0xa0, 0x66, 0x57, 0xe0, 0x98, 0x39, 0xfd, 0xdf, 0x0c, 0xda, 0xeb, 0x0d, 0xd5, 0x7a, + 0x3f, 0xa1, 0xfb, 0x8f, 0x73, 0xd9, 0x2b, 0x80, 0x58, 0xe4, 0x7e, 0x80, 0x4b, 0x89, 0x48, 0x31, + 0x9e, 0xf1, 0x56, 0x2c, 0xf2, 0x1b, 0x02, 0x4c, 0xc8, 0x86, 0x8e, 0x70, 0x83, 0x11, 0x25, 0x79, + 0xc6, 0x9d, 0x58, 0xe4, 0x5f, 0xcc, 0x9e, 0xf5, 0xa1, 0xb3, 0x27, 0xab, 0xfb, 0x9a, 0xf9, 0x6c, + 0xf3, 0xf3, 0xaa, 0xa6, 0xbc, 0xc8, 0xfb, 0xef, 0xef, 0x96, 0xa1, 0x5e, 0x65, 0xb3, 0xc1, 0x3c, + 0x8d, 0x87, 0xab, 0xdd, 0x03, 0xca, 0xf2, 0xe5, 0x2d, 0xc4, 0x4c, 0x86, 0xf3, 0x21, 0xbd, 0x34, + 0x35, 0x3c, 0xf6, 0x1c, 0x67, 0x4d, 0x22, 0xdf, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x88, + 0xb3, 0x32, 0xad, 0x03, 0x00, 0x00, +} diff --git a/protos/ledger/rwset/kvrwset/kv_rwset.proto b/protos/ledger/rwset/kvrwset/kv_rwset.proto new file mode 100644 index 00000000000..c132c5d28a9 --- /dev/null +++ b/protos/ledger/rwset/kvrwset/kv_rwset.proto @@ -0,0 +1,83 @@ +/* +Copyright IBM Corp. 2016 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +syntax = "proto3"; + +option go_package = "github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset"; + +package kvrwset; + +// KVRWSet encapsulates the read-write set for a chaincode that operates upon a KV or Document data model +message KVRWSet { + repeated KVRead reads = 1; + repeated RangeQueryInfo range_queries_info = 2; + repeated KVWrite writes = 3; +} + +// KVRead captures a read operation performed during transaction simulation +// A 'nil' version indicates a non-existing key read by the transaction +message KVRead { + string key = 1; + Version version = 2; +} + +// KVWrite captures a write (update/delete) operation performed during transaction simulation +message KVWrite { + string key = 1; + bool is_delete = 2; + bytes value = 3; +} + +// Version encapsulates the version of a Key +// A version of a committed key is maintained as the height of the transaction that committed the key. +// The height is represenetd as a tuple where the txNum is the height of the transaction +// (starting with 1) within block +message Version { + uint64 block_num = 1; + uint64 tx_num = 2; +} + +// RangeQueryInfo encapsulates the details of a range query performed by a transaction during simulation. +// This helps protect transactions from phantom reads by varifying during validation whether any new items +// got committed within the given range between transaction simuation and validation +// (in addition to regular checks for updates/deletes of the existing items). +// readInfo field contains either the KVReads (for the items read by the range query) or a merkle-tree hash +// if the KVReads exceeds a pre-configured numbers +message RangeQueryInfo { + string start_key = 1; + string end_key = 2; + bool itr_exhausted = 3; + oneof reads_info { + QueryReads raw_reads = 4; + QueryReadsMerkleSummary reads_merkle_hashes = 5; + } +} + +// QueryReads encapsulates the KVReads for the items read by a transaction as a result of a query execution +message QueryReads { + repeated KVRead kv_reads = 1; +} + +// QueryReadsMerkleSummary encapsulates the Merkle-tree hashes for the QueryReads +// This allows to reduce the size of RWSet in the presence of query results +// by storing certain hashes instead of actual results. +// maxDegree field refers to the maximum number of children in the tree at any level +// maxLevel field contains the lowest level which has lesser nodes than maxDegree (starting from leaf level) +message QueryReadsMerkleSummary { + uint32 max_degree = 1; + uint32 max_level = 2; + repeated bytes max_level_hashes = 3; +} \ No newline at end of file diff --git a/protos/ledger/rwset/rwset.pb.go b/protos/ledger/rwset/rwset.pb.go new file mode 100644 index 00000000000..cbf5c272079 --- /dev/null +++ b/protos/ledger/rwset/rwset.pb.go @@ -0,0 +1,106 @@ +// Code generated by protoc-gen-go. +// source: ledger/rwset/rwset.proto +// DO NOT EDIT! + +/* +Package rwset is a generated protocol buffer package. + +It is generated from these files: + ledger/rwset/rwset.proto + +It has these top-level messages: + TxReadWriteSet + NsReadWriteSet +*/ +package rwset + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type TxReadWriteSet_DataModel int32 + +const ( + TxReadWriteSet_KV TxReadWriteSet_DataModel = 0 +) + +var TxReadWriteSet_DataModel_name = map[int32]string{ + 0: "KV", +} +var TxReadWriteSet_DataModel_value = map[string]int32{ + "KV": 0, +} + +func (x TxReadWriteSet_DataModel) String() string { + return proto.EnumName(TxReadWriteSet_DataModel_name, int32(x)) +} +func (TxReadWriteSet_DataModel) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// TxReadWriteSet encapsulates a read-write set for a transaction +// DataModel specifies the enum value of the data model +// ns_rwset field specifies a list of chaincode specific read-write set (one for each chaincode) +type TxReadWriteSet struct { + DataModel TxReadWriteSet_DataModel `protobuf:"varint,1,opt,name=data_model,json=dataModel,enum=rwset.TxReadWriteSet_DataModel" json:"data_model,omitempty"` + NsRwset []*NsReadWriteSet `protobuf:"bytes,2,rep,name=ns_rwset,json=nsRwset" json:"ns_rwset,omitempty"` +} + +func (m *TxReadWriteSet) Reset() { *m = TxReadWriteSet{} } +func (m *TxReadWriteSet) String() string { return proto.CompactTextString(m) } +func (*TxReadWriteSet) ProtoMessage() {} +func (*TxReadWriteSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *TxReadWriteSet) GetNsRwset() []*NsReadWriteSet { + if m != nil { + return m.NsRwset + } + return nil +} + +// NsReadWriteSet encapsulates the read-write set for a chaincode +type NsReadWriteSet struct { + Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"` + Rwset []byte `protobuf:"bytes,2,opt,name=rwset,proto3" json:"rwset,omitempty"` +} + +func (m *NsReadWriteSet) Reset() { *m = NsReadWriteSet{} } +func (m *NsReadWriteSet) String() string { return proto.CompactTextString(m) } +func (*NsReadWriteSet) ProtoMessage() {} +func (*NsReadWriteSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*TxReadWriteSet)(nil), "rwset.TxReadWriteSet") + proto.RegisterType((*NsReadWriteSet)(nil), "rwset.NsReadWriteSet") + proto.RegisterEnum("rwset.TxReadWriteSet_DataModel", TxReadWriteSet_DataModel_name, TxReadWriteSet_DataModel_value) +} + +func init() { proto.RegisterFile("ledger/rwset/rwset.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 230 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0xc8, 0x49, 0x4d, 0x49, + 0x4f, 0x2d, 0xd2, 0x2f, 0x2a, 0x2f, 0x4e, 0x2d, 0x81, 0x90, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, + 0x42, 0xac, 0x60, 0x8e, 0xd2, 0x74, 0x46, 0x2e, 0xbe, 0x90, 0x8a, 0xa0, 0xd4, 0xc4, 0x94, 0xf0, + 0xa2, 0xcc, 0x92, 0xd4, 0xe0, 0xd4, 0x12, 0x21, 0x3b, 0x2e, 0xae, 0x94, 0xc4, 0x92, 0xc4, 0xf8, + 0xdc, 0xfc, 0x94, 0xd4, 0x1c, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x3e, 0x23, 0x79, 0x3d, 0x88, 0x5e, + 0x54, 0xa5, 0x7a, 0x2e, 0x89, 0x25, 0x89, 0xbe, 0x20, 0x65, 0x41, 0x9c, 0x29, 0x30, 0xa6, 0x90, + 0x01, 0x17, 0x47, 0x5e, 0x71, 0x3c, 0x58, 0xbd, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x28, + 0x54, 0xb7, 0x5f, 0x31, 0xb2, 0xee, 0x20, 0xf6, 0xbc, 0xe2, 0x20, 0xb0, 0x23, 0x84, 0xb9, 0x38, + 0xe1, 0x26, 0x09, 0xb1, 0x71, 0x31, 0x79, 0x87, 0x09, 0x30, 0x28, 0xb9, 0x70, 0xf1, 0xa1, 0xaa, + 0x17, 0x92, 0xe1, 0xe2, 0xcc, 0x4b, 0xcc, 0x4d, 0x2d, 0x2e, 0x48, 0x4c, 0x4e, 0x05, 0xbb, 0x8b, + 0x33, 0x08, 0x21, 0x20, 0x24, 0xc2, 0xc5, 0x0a, 0xb3, 0x93, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, + 0x32, 0x8e, 0x32, 0x4c, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0xa8, + 0x2c, 0x48, 0x2d, 0x82, 0x06, 0x49, 0x5a, 0x62, 0x52, 0x51, 0x66, 0xb2, 0x3e, 0x38, 0x34, 0x8a, + 0xf5, 0x91, 0xc3, 0x29, 0x89, 0x0d, 0x2c, 0x68, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xde, 0xa5, + 0x9e, 0x0e, 0x3e, 0x01, 0x00, 0x00, +} diff --git a/protos/ledger/rwset/rwset.proto b/protos/ledger/rwset/rwset.proto new file mode 100644 index 00000000000..9228eb96dfd --- /dev/null +++ b/protos/ledger/rwset/rwset.proto @@ -0,0 +1,38 @@ +/* +Copyright IBM Corp. 2016 All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +syntax = "proto3"; + +option go_package = "github.com/hyperledger/fabric/protos/ledger/rwset"; + +package rwset; + +// TxReadWriteSet encapsulates a read-write set for a transaction +// DataModel specifies the enum value of the data model +// ns_rwset field specifies a list of chaincode specific read-write set (one for each chaincode) +message TxReadWriteSet { + enum DataModel { + KV = 0; + } + DataModel data_model = 1; + repeated NsReadWriteSet ns_rwset = 2; +} + +// NsReadWriteSet encapsulates the read-write set for a chaincode +message NsReadWriteSet { + string namespace = 1; + bytes rwset = 2; // Data model specific serialized proto message (e.g., kvrwset.KVRWSet for KV and Document data models) +} \ No newline at end of file