Skip to content

Commit

Permalink
feat(consensus): additional sanity checks for the size of proposed bl…
Browse files Browse the repository at this point in the history
…ocks (backport #1408) (#2140)

This is an automatic backport of pull request #1408 done by
[Mergify](https://mergify.com).
Cherry-pick of 28ad4d2 has failed:
```
On branch mergify/bp/v0.37.x/pr-1408
Your branch is up to date with 'origin/v0.37.x'.

You are currently cherry-picking commit 28ad4d2.
  (fix conflicts and run "git cherry-pick --continue")
  (use "git cherry-pick --skip" to skip this patch)
  (use "git cherry-pick --abort" to cancel the cherry-pick operation)

Changes to be committed:
	modified:   consensus/state.go
	modified:   crypto/merkle/proof.go
	modified:   evidence/pool_test.go
	modified:   state/execution_test.go
	modified:   types/part_set.go
	modified:   types/part_set_test.go

Unmerged paths:
  (use "git add/rm <file>..." as appropriate to mark resolution)
	deleted by us:   internal/consensus/errors.go
	deleted by us:   internal/consensus/state_test.go
	deleted by us:   internal/state/store_test.go
	deleted by us:   internal/store/store_test.go
	both modified:   types/event_bus_test.go

```


To fix up this pull request, you can check it out locally. See
documentation:
https://docs.github.com/en/github/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/checking-out-pull-requests-locally

---


<details>
<summary>Mergify commands and options</summary>

<br />

More conditions and actions can be found in the
[documentation](https://docs.mergify.com/).

You can also trigger Mergify actions by commenting on this pull request:

- `@Mergifyio refresh` will re-evaluate the rules
- `@Mergifyio rebase` will rebase this PR on its base branch
- `@Mergifyio update` will merge the base branch into this PR
- `@Mergifyio backport <destination>` will backport this PR on
`<destination>` branch

Additionally, on Mergify [dashboard](https://dashboard.mergify.com) you
can:

- look at your merge queues
- generate the Mergify configuration with the config editor.

Finally, you can contact us on https://mergify.com
</details>

---------

Co-authored-by: Daniel <daniel.cason@informal.systems>
Co-authored-by: Sergio Mena <sergio@informal.systems>
Co-authored-by: Andy Nogueira <me@andynogueira.dev>
Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
  • Loading branch information
5 people committed Feb 9, 2024
1 parent f379190 commit 0401888
Show file tree
Hide file tree
Showing 8 changed files with 91 additions and 21 deletions.
10 changes: 10 additions & 0 deletions consensus/state.go
Expand Up @@ -36,6 +36,7 @@ var (
ErrInvalidProposalPOLRound = errors.New("error invalid proposal POL round")
ErrAddingVote = errors.New("error adding vote")
ErrSignatureFoundInPastBlocks = errors.New("found signature from the same key")
ErrProposalTooManyParts = errors.New("proposal block has too many parts")

errPubKeyIsNotSet = errors.New("pubkey is not set. Look for \"Can't get private validator pubkey\" errors")
)
Expand Down Expand Up @@ -1887,6 +1888,15 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
return ErrInvalidProposalSignature
}

// Validate the proposed block size, derived from its PartSetHeader
maxBytes := cs.state.ConsensusParams.Block.MaxBytes
if maxBytes == -1 {
maxBytes = int64(types.MaxBlockSizeBytes)
}
if int64(proposal.BlockID.PartSetHeader.Total) > (maxBytes-1)/int64(types.BlockPartSizeBytes)+1 {
return ErrProposalTooManyParts
}

proposal.Signature = p.Signature
cs.Proposal = proposal
// We don't update cs.ProposalBlockParts if it is already set.
Expand Down
16 changes: 14 additions & 2 deletions consensus/state_test.go
Expand Up @@ -253,7 +253,7 @@ func TestStateBadProposal(t *testing.T) {
}

func TestStateOversizedBlock(t *testing.T) {
const maxBytes = 2000
const maxBytes = int64(types.BlockPartSizeBytes)

for _, testCase := range []struct {
name string
Expand Down Expand Up @@ -299,14 +299,21 @@ func TestStateOversizedBlock(t *testing.T) {
totalBytes += len(part.Bytes)
}

maxBlockParts := maxBytes / int64(types.BlockPartSizeBytes)
if maxBytes > maxBlockParts*int64(types.BlockPartSizeBytes) {
maxBlockParts++
}
numBlockParts := int64(propBlockParts.Total())

if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}

// start the machine
startTestRound(cs1, height, round)

t.Log("Block Sizes;", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes)
t.Log("Block Sizes;", "Limit", maxBytes, "Current", totalBytes)
t.Log("Proposal Parts;", "Maximum", maxBlockParts, "Current", numBlockParts)

validateHash := propBlock.Hash()
lockedRound := int32(1)
Expand All @@ -322,6 +329,11 @@ func TestStateOversizedBlock(t *testing.T) {
ensurePrevote(voteCh, height, round)
validatePrevote(t, cs1, round, vss[0], validateHash)

// Should not accept a Proposal with too many block parts
if numBlockParts > maxBlockParts {
require.Nil(t, cs1.Proposal)
}

bps, err := propBlock.MakePartSet(partSize)
require.NoError(t, err)

Expand Down
8 changes: 4 additions & 4 deletions crypto/merkle/proof.go
Expand Up @@ -24,10 +24,10 @@ const (
// everything. This also affects the generalized proof system as
// well.
type Proof struct {
Total int64 `json:"total"` // Total number of items.
Index int64 `json:"index"` // Index of item to prove.
LeafHash []byte `json:"leaf_hash"` // Hash of item value.
Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
Total int64 `json:"total"` // Total number of items.
Index int64 `json:"index"` // Index of item to prove.
LeafHash []byte `json:"leaf_hash"` // Hash of item value.
Aunts [][]byte `json:"aunts,omitempty"` // Hashes from leaf's sibling to a root's child.
}

// ProofsFromByteSlices computes inclusion proof for given items.
Expand Down
3 changes: 1 addition & 2 deletions evidence/pool_test.go
Expand Up @@ -416,8 +416,7 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) (*store.Blo
block := state.MakeBlock(i, test.MakeNTxs(i, 1), lastCommit, nil, state.Validators.Proposer.Address)
block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute)
block.Header.Version = cmtversion.Consensus{Block: version.BlockProtocol, App: 1}
const parts = 1
partSet, err := block.MakePartSet(parts)
partSet, err := block.MakePartSet(types.BlockPartSizeBytes)
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion state/execution_test.go
Expand Up @@ -32,7 +32,7 @@ import (

var (
chainID = "execution_chain"
testPartSize uint32 = 65536
testPartSize uint32 = types.BlockPartSizeBytes
)

func TestApplyBlock(t *testing.T) {
Expand Down
32 changes: 22 additions & 10 deletions store/store_test.go
Expand Up @@ -2,6 +2,7 @@ package store

import (
"bytes"
"encoding/json"
"fmt"
stdlog "log"
"os"
Expand Down Expand Up @@ -139,9 +140,10 @@ func TestMain(m *testing.M) {
var cleanup cleanupFunc
var err error
state, _, cleanup = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
block = state.MakeBlock(state.LastBlockHeight+1, test.MakeNTxs(state.LastBlockHeight+1, 10), new(types.Commit), nil, state.Validators.GetProposer().Address)
txs := []types.Tx{make([]byte, types.BlockPartSizeBytes)} // TX taking one block part alone
block = state.MakeBlock(state.LastBlockHeight+1, txs, new(types.Commit), nil, state.Validators.GetProposer().Address)

partSet, err = block.MakePartSet(2)
partSet, err = block.MakePartSet(types.BlockPartSizeBytes)
if err != nil {
stdlog.Fatal(err)
}
Expand Down Expand Up @@ -169,10 +171,14 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
}
}

// save a block
block := state.MakeBlock(bs.Height()+1, nil, new(types.Commit), nil, state.Validators.GetProposer().Address)
validPartSet, err := block.MakePartSet(2)
// save a block big enough to have two block parts
txs := []types.Tx{make([]byte, types.BlockPartSizeBytes)} // TX taking one block part alone
block := state.MakeBlock(bs.Height()+1, txs, new(types.Commit), nil, state.Validators.GetProposer().Address)
validPartSet, err := block.MakePartSet(types.BlockPartSizeBytes)
require.NoError(t, err)
require.GreaterOrEqual(t, validPartSet.Total(), uint32(2))
part2 = validPartSet.GetPart(1)

seenCommit := makeTestCommit(10, cmttime.Now())
bs.SaveBlock(block, partSet, seenCommit)
require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed")
Expand Down Expand Up @@ -380,7 +386,7 @@ func TestLoadBaseMeta(t *testing.T) {

for h := int64(1); h <= 10; h++ {
block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address)
partSet, err := block.MakePartSet(2)
partSet, err := block.MakePartSet(types.BlockPartSizeBytes)
require.NoError(t, err)
seenCommit := makeTestCommit(h, cmttime.Now())
bs.SaveBlock(block, partSet, seenCommit)
Expand Down Expand Up @@ -426,7 +432,13 @@ func TestLoadBlockPart(t *testing.T) {
gotPart, _, panicErr := doFn(loadPart)
require.Nil(t, panicErr, "an existent and proper block should not panic")
require.Nil(t, res, "a properly saved block should return a proper block")
require.Equal(t, gotPart.(*types.Part), part1,

// Having to do this because of https://github.com/stretchr/testify/issues/1141
gotPartJSON, err := json.Marshal(gotPart.(*types.Part))
require.NoError(t, err)
part1JSON, err := json.Marshal(part1)
require.NoError(t, err)
require.JSONEq(t, string(gotPartJSON), string(part1JSON),
"expecting successful retrieval of previously saved block")
}

Expand Down Expand Up @@ -454,7 +466,7 @@ func TestPruneBlocks(t *testing.T) {
// make more than 1000 blocks, to test batch deletions
for h := int64(1); h <= 1500; h++ {
block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address)
partSet, err := block.MakePartSet(2)
partSet, err := block.MakePartSet(types.BlockPartSizeBytes)
require.NoError(t, err)
seenCommit := makeTestCommit(h, cmttime.Now())
bs.SaveBlock(block, partSet, seenCommit)
Expand Down Expand Up @@ -573,7 +585,7 @@ func TestLoadBlockMetaByHash(t *testing.T) {
bs := NewBlockStore(dbm.NewMemDB())

b1 := state.MakeBlock(state.LastBlockHeight+1, test.MakeNTxs(state.LastBlockHeight+1, 10), new(types.Commit), nil, state.Validators.GetProposer().Address)
partSet, err := b1.MakePartSet(2)
partSet, err := b1.MakePartSet(types.BlockPartSizeBytes)
require.NoError(t, err)
seenCommit := makeTestCommit(1, cmttime.Now())
bs.SaveBlock(b1, partSet, seenCommit)
Expand All @@ -590,7 +602,7 @@ func TestBlockFetchAtHeight(t *testing.T) {
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
block := state.MakeBlock(bs.Height()+1, nil, new(types.Commit), nil, state.Validators.GetProposer().Address)

partSet, err := block.MakePartSet(2)
partSet, err := block.MakePartSet(types.BlockPartSizeBytes)
require.NoError(t, err)
seenCommit := makeTestCommit(10, cmttime.Now())
bs.SaveBlock(block, partSet, seenCommit)
Expand Down
13 changes: 12 additions & 1 deletion types/part_set.go
Expand Up @@ -18,6 +18,8 @@ import (
var (
ErrPartSetUnexpectedIndex = errors.New("error part set unexpected index")
ErrPartSetInvalidProof = errors.New("error part set invalid proof")
ErrPartTooBig = errors.New("error part size too big")
ErrPartInvalidSize = errors.New("error inner part with invalid size")
)

type Part struct {
Expand All @@ -29,7 +31,11 @@ type Part struct {
// ValidateBasic performs basic validation.
func (part *Part) ValidateBasic() error {
if len(part.Bytes) > int(BlockPartSizeBytes) {
return fmt.Errorf("too big: %d bytes, max: %d", len(part.Bytes), BlockPartSizeBytes)
return ErrPartTooBig
}
// All parts except the last one should have the same constant size.
if int64(part.Index) < part.Proof.Total-1 && len(part.Bytes) != int(BlockPartSizeBytes) {
return ErrPartInvalidSize
}
if err := part.Proof.ValidateBasic(); err != nil {
return fmt.Errorf("wrong Proof: %w", err)
Expand Down Expand Up @@ -280,6 +286,11 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) {
return false, nil
}

// The proof should be compatible with the number of parts.
if part.Proof.Total != int64(ps.total) {
return false, ErrPartSetInvalidProof
}

// Check hash proof
if part.Proof.Verify(ps.Hash(), part.Bytes) != nil {
return false, ErrPartSetInvalidProof
Expand Down
28 changes: 27 additions & 1 deletion types/part_set_test.go
Expand Up @@ -86,6 +86,22 @@ func TestWrongProof(t *testing.T) {
if added || err == nil {
t.Errorf("expected to fail adding a part with bad bytes.")
}

// Test adding a part with wrong proof index.
part = partSet.GetPart(2)
part.Proof.Index = 1
added, err = partSet2.AddPart(part)
if added || err == nil {
t.Errorf("expected to fail adding a part with bad proof index.")
}

// Test adding a part with wrong proof total.
part = partSet.GetPart(3)
part.Proof.Total = int64(partSet.Total() - 1)
added, err = partSet2.AddPart(part)
if added || err == nil {
t.Errorf("expected to fail adding a part with bad proof total.")
}
}

func TestPartSetHeaderValidateBasic(t *testing.T) {
Expand Down Expand Up @@ -117,9 +133,19 @@ func TestPartValidateBasic(t *testing.T) {
}{
{"Good Part", func(pt *Part) {}, false},
{"Too big part", func(pt *Part) { pt.Bytes = make([]byte, BlockPartSizeBytes+1) }, true},
{"Good small last part", func(pt *Part) {
pt.Index = 1
pt.Bytes = make([]byte, BlockPartSizeBytes-1)
pt.Proof.Total = 2
}, false},
{"Too small inner part", func(pt *Part) {
pt.Index = 0
pt.Bytes = make([]byte, BlockPartSizeBytes-1)
pt.Proof.Total = 2
}, true},
{"Too big proof", func(pt *Part) {
pt.Proof = merkle.Proof{
Total: 1,
Total: 2,
Index: 1,
LeafHash: make([]byte, 1024*1024),
}
Expand Down

0 comments on commit 0401888

Please sign in to comment.