Skip to content

Commit

Permalink
Rename contiguous => compact and message => sparse (#704)
Browse files Browse the repository at this point in the history
* chore: delete sims from root Makefile

* Rename contiguous to compact and message to sparse

Closes #688

* rename method receiver to match struct name

* revert: rename for celestia-core types
  • Loading branch information
rootulp committed Sep 14, 2022
1 parent cbda4f6 commit 7bbb270
Show file tree
Hide file tree
Showing 13 changed files with 127 additions and 126 deletions.
4 changes: 3 additions & 1 deletion app/split_shares.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ type shareSplitter struct {
msgWriter *coretypes.MessageShareWriter

// Since evidence will always be included in a block, we do not need to
// generate these share lazily. Therefore instead of a ContiguousShareWriter
// generate these share lazily. Therefore instead of a CompactShareWriter
// we use the normal eager mechanism
evdShares [][]byte

Expand All @@ -143,6 +143,8 @@ func newShareSplitter(txConf client.TxConfig, squareSize uint64, data *core.Data
panic(err)
}

// TODO: we should be able to use the CompactShareWriter and
// SparseShareWriter defined in pkg/shares here
sqwr.txWriter = coretypes.NewContiguousShareWriter(consts.TxNamespaceID)
sqwr.msgWriter = coretypes.NewMessageShareWriter()

Expand Down
8 changes: 4 additions & 4 deletions docs/architecture/ADR-001-ABCI++.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Proposed and initial implementation is complete.

### [#631](https://github.com/celestiaorg/celestia-core/pull/631) Simplified version of ABCI++ (`celestia-core`)

Here we are adding only the two new methods that are necessary for the features that we need.
Here we are adding only the two new methods that are necessary for the features that we need.

```go
// Application is an interface that enables any finite, deterministic state machine
Expand Down Expand Up @@ -185,11 +185,11 @@ We estimate the square size by assuming that all the malleable transactions in t
In order to efficiently fill the data square and ensure that each message included in the block is paid for, we progressively generate the data square using a few new types. More details can be found in [#637](https://github.com/celestiaorg/celestia-core/pull/637)

```go
// ContiguousShareWriter lazily merges transaction or other contiguous types in
// CompactShareWriter lazily merges transaction or other compact types in
// the block data into shares that will eventually be included in a data square.
// It also has methods to help progressively count how many shares the transactions
// written take up.
type ContiguousShareWriter struct {
type CompactShareWriter struct {
shares []NamespacedShare
pendingShare NamespacedShare
namespace namespace.ID
Expand All @@ -211,7 +211,7 @@ These types are combined in a new celestia-app type, `shareSplitter`, which is r
// that message and their corresponding txs get written to the square
// atomically.
type shareSplitter struct {
txWriter *coretypes.ContiguousShareWriter
txWriter *coretypes.CompactShareWriter
msgWriter *coretypes.MessageShareWriter
...
}
Expand Down
24 changes: 12 additions & 12 deletions pkg/prove/proof_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,12 @@ import (

func TestTxInclusion(t *testing.T) {
typicalBlockData := types.Data{
Txs: generateRandomlySizedContiguousShares(100, 500),
Txs: generateRandomlySizedTxs(100, 500),
Messages: generateRandomlySizedMessages(40, 16000),
OriginalSquareSize: 64,
}
lotsOfTxsNoMessages := types.Data{
Txs: generateRandomlySizedContiguousShares(1000, 500),
Txs: generateRandomlySizedTxs(1000, 500),
OriginalSquareSize: 64,
}
overlappingSquareSize := 16
Expand Down Expand Up @@ -83,23 +83,23 @@ func TestTxSharePosition(t *testing.T) {
tests := []test{
{
name: "typical",
txs: generateRandomlySizedContiguousShares(44, 200),
txs: generateRandomlySizedTxs(44, 200),
},
{
name: "many small tx",
txs: generateRandomlySizedContiguousShares(444, 100),
txs: generateRandomlySizedTxs(444, 100),
},
{
name: "one small tx",
txs: generateRandomlySizedContiguousShares(1, 200),
txs: generateRandomlySizedTxs(1, 200),
},
{
name: "one large tx",
txs: generateRandomlySizedContiguousShares(1, 2000),
txs: generateRandomlySizedTxs(1, 2000),
},
{
name: "many large txs",
txs: generateRandomlySizedContiguousShares(100, 2000),
txs: generateRandomlySizedTxs(100, 2000),
},
}

Expand Down Expand Up @@ -137,7 +137,7 @@ func TestTxSharePosition(t *testing.T) {
func Test_genRowShares(t *testing.T) {
squareSize := uint64(16)
typicalBlockData := types.Data{
Txs: generateRandomlySizedContiguousShares(10, 200),
Txs: generateRandomlySizedTxs(10, 200),
Messages: generateRandomlySizedMessages(20, 1000),
OriginalSquareSize: squareSize,
}
Expand Down Expand Up @@ -173,7 +173,7 @@ func Test_genOrigRowShares(t *testing.T) {
txCount := 100
squareSize := uint64(16)
typicalBlockData := types.Data{
Txs: generateRandomlySizedContiguousShares(txCount, 200),
Txs: generateRandomlySizedTxs(txCount, 200),
Messages: generateRandomlySizedMessages(10, 1500),
OriginalSquareSize: squareSize,
}
Expand All @@ -197,19 +197,19 @@ func joinByteSlices(s ...[]byte) string {
return strings.Join(out, "")
}

func generateRandomlySizedContiguousShares(count, max int) types.Txs {
func generateRandomlySizedTxs(count, max int) types.Txs {
txs := make(types.Txs, count)
for i := 0; i < count; i++ {
size := rand.Intn(max)
if size == 0 {
size = 1
}
txs[i] = generateRandomContiguousShares(1, size)[0]
txs[i] = generateRandomTxs(1, size)[0]
}
return txs
}

func generateRandomContiguousShares(count, size int) types.Txs {
func generateRandomTxs(count, size int) types.Txs {
txs := make(types.Txs, count)
for i := 0; i < count; i++ {
tx := make([]byte, size)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,17 @@ import (
coretypes "github.com/tendermint/tendermint/types"
)

func TestContigShareWriter(t *testing.T) {
func TestCompactShareWriter(t *testing.T) {
// note that this test is mainly for debugging purposes, the main round trip
// tests occur in TestMerge and Test_processContiguousShares
w := NewContiguousShareSplitter(consts.TxNamespaceID)
txs := generateRandomContiguousShares(33, 200)
// tests occur in TestMerge and Test_processCompactShares
w := NewCompactShareSplitter(consts.TxNamespaceID)
txs := generateRandomCompactShares(33, 200)
for _, tx := range txs {
rawTx, _ := tx.MarshalDelimited()
w.WriteBytes(rawTx)
}
resShares := w.Export()
rawResTxs, err := processContiguousShares(resShares.RawShares())
rawResTxs, err := parseCompactShares(resShares.RawShares())
resTxs := coretypes.ToTxs(rawResTxs)
require.NoError(t, err)

Expand All @@ -31,7 +31,7 @@ func TestContigShareWriter(t *testing.T) {

func Test_parseDelimiter(t *testing.T) {
for i := uint64(0); i < 100; i++ {
tx := generateRandomContiguousShares(1, int(i))[0]
tx := generateRandomCompactShares(1, int(i))[0]
input, err := tx.MarshalDelimited()
if err != nil {
panic(err)
Expand All @@ -45,22 +45,22 @@ func Test_parseDelimiter(t *testing.T) {
}
}

func TestFuzz_processContiguousShares(t *testing.T) {
func TestFuzz_processCompactShares(t *testing.T) {
t.Skip()
// run random shares through processContiguousShares for a minute
// run random shares through processCompactShares for a minute
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
for {
select {
case <-ctx.Done():
return
default:
Test_processContiguousShares(t)
Test_processCompactShares(t)
}
}
}

func Test_processContiguousShares(t *testing.T) {
func Test_processCompactShares(t *testing.T) {
// exactTxShareSize is the length of tx that will fit exactly into a single
// share, accounting for namespace id and the length delimiter prepended to
// each tx
Expand Down Expand Up @@ -88,11 +88,11 @@ func Test_processContiguousShares(t *testing.T) {

// run the tests with identically sized txs
t.Run(fmt.Sprintf("%s idendically sized ", tc.name), func(t *testing.T) {
txs := generateRandomContiguousShares(tc.txCount, tc.txSize)
txs := generateRandomCompactShares(tc.txCount, tc.txSize)

shares := SplitTxs(txs)

parsedTxs, err := processContiguousShares(shares)
parsedTxs, err := parseCompactShares(shares)
if err != nil {
t.Error(err)
}
Expand All @@ -105,11 +105,11 @@ func Test_processContiguousShares(t *testing.T) {

// run the same tests using randomly sized txs with caps of tc.txSize
t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) {
txs := generateRandomlySizedContiguousShares(tc.txCount, tc.txSize)
txs := generateRandomlySizedCompactShares(tc.txCount, tc.txSize)

shares := SplitTxs(txs)

parsedTxs, err := processContiguousShares(shares)
parsedTxs, err := parseCompactShares(shares)
if err != nil {
t.Error(err)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/shares/non_interactive_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ func FitsInSquare(cursor, squareSize int, msgShareLens ...int) (bool, int) {
if len(msgShareLens) > 0 {
firstMsgLen = msgShareLens[0]
}
// here we account for padding between the contiguous and message shares
// here we account for padding between the compact and sparse shares
cursor, _ = NextAlignedPowerOfTwo(cursor, firstMsgLen, squareSize)
sharesUsed, _ := MsgSharesUsedNIDefaults(cursor, squareSize, msgShareLens...)
return cursor+sharesUsed <= squareSize*squareSize, sharesUsed
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ import (
"github.com/tendermint/tendermint/pkg/consts"
)

// processContiguousShares takes raw shares and extracts out transactions,
// parseCompactShares takes raw shares and extracts out transactions,
// intermediate state roots, or evidence. The returned [][]byte do not have
// namespaces or length delimiters and are ready to be unmarshalled
func processContiguousShares(shares [][]byte) (txs [][]byte, err error) {
func parseCompactShares(shares [][]byte) (txs [][]byte, err error) {
if len(shares) == 0 {
return nil, nil
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,8 @@ import (
"github.com/celestiaorg/celestia-app/pkg/appconsts"
)

// parseMsgShares iterates through raw shares and separates the contiguous chunks
// of data. It is only used for Messages, i.e. shares with a non-reserved namespace.
func parseMsgShares(shares [][]byte) ([]coretypes.Message, error) {
// parseSparseShares iterates through raw shares and parses out individual messages.
func parseSparseShares(shares [][]byte) ([]coretypes.Message, error) {
if len(shares) == 0 {
return nil, nil
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/shares/share_merging.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func Merge(eds *rsmt2d.ExtendedDataSquare) (coretypes.Data, error) {
// ParseTxs collects all of the transactions from the shares provided
func ParseTxs(shares [][]byte) (coretypes.Txs, error) {
// parse the sharse
rawTxs, err := processContiguousShares(shares)
rawTxs, err := parseCompactShares(shares)
if err != nil {
return nil, err
}
Expand All @@ -95,7 +95,7 @@ func ParseTxs(shares [][]byte) (coretypes.Txs, error) {
func ParseEvd(shares [][]byte) (coretypes.EvidenceData, error) {
// the raw data returned does not have length delimiters or namespaces and
// is ready to be unmarshaled
rawEvd, err := processContiguousShares(shares)
rawEvd, err := parseCompactShares(shares)
if err != nil {
return coretypes.EvidenceData{}, err
}
Expand Down Expand Up @@ -123,7 +123,7 @@ func ParseEvd(shares [][]byte) (coretypes.EvidenceData, error) {

// ParseMsgs collects all messages from the shares provided
func ParseMsgs(shares [][]byte) (coretypes.Messages, error) {
msgList, err := parseMsgShares(shares)
msgList, err := parseSparseShares(shares)
if err != nil {
return coretypes.Messages{}, err
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/shares/share_splitting.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,15 +86,15 @@ func ExtractShareIndexes(txs coretypes.Txs) []uint32 {
}

func SplitTxs(txs coretypes.Txs) [][]byte {
writer := NewContiguousShareSplitter(consts.TxNamespaceID)
writer := NewCompactShareSplitter(consts.TxNamespaceID)
for _, tx := range txs {
writer.WriteTx(tx)
}
return writer.Export().RawShares()
}

func SplitEvidence(evd coretypes.EvidenceList) ([][]byte, error) {
writer := NewContiguousShareSplitter(consts.EvidenceNamespaceID)
writer := NewCompactShareSplitter(consts.EvidenceNamespaceID)
var err error
for _, ev := range evd {
err = writer.WriteEvidence(ev)
Expand All @@ -109,7 +109,7 @@ func SplitMessages(indexes []uint32, msgs []coretypes.Message) ([][]byte, error)
if indexes != nil && len(indexes) != len(msgs) {
return nil, ErrIncorrectNumberOfIndexes
}
writer := NewMessageShareSplitter()
writer := NewSparseShareSplitter()
for i, msg := range msgs {
writer.Write(msg)
if indexes != nil && len(indexes) > i+1 {
Expand Down
10 changes: 5 additions & 5 deletions pkg/shares/shares_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ func TestMerge(t *testing.T) {

func TestFuzz_Merge(t *testing.T) {
t.Skip()
// run random shares through processContiguousShares for a minute
// run random shares through processCompactShares for a minute
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
for {
Expand All @@ -277,25 +277,25 @@ func TestFuzz_Merge(t *testing.T) {
// generateRandomBlockData returns randomly generated block data for testing purposes
func generateRandomBlockData(txCount, evdCount, msgCount, maxSize int) coretypes.Data {
var out coretypes.Data
out.Txs = generateRandomlySizedContiguousShares(txCount, maxSize)
out.Txs = generateRandomlySizedCompactShares(txCount, maxSize)
out.Evidence = generateIdenticalEvidence(evdCount)
out.Messages = generateRandomlySizedMessages(msgCount, maxSize)
return out
}

func generateRandomlySizedContiguousShares(count, max int) coretypes.Txs {
func generateRandomlySizedCompactShares(count, max int) coretypes.Txs {
txs := make(coretypes.Txs, count)
for i := 0; i < count; i++ {
size := rand.Intn(max)
if size == 0 {
size = 1
}
txs[i] = generateRandomContiguousShares(1, size)[0]
txs[i] = generateRandomCompactShares(1, size)[0]
}
return txs
}

func generateRandomContiguousShares(count, size int) coretypes.Txs {
func generateRandomCompactShares(count, size int) coretypes.Txs {
txs := make(coretypes.Txs, count)
for i := 0; i < count; i++ {
tx := make([]byte, size)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (
coretypes "github.com/tendermint/tendermint/types"
)

func Test_parseMsgShares(t *testing.T) {
func Test_parseSparseShares(t *testing.T) {
// exactMsgShareSize is the length of message that will fit exactly into a single
// share, accounting for namespace id and the length delimiter prepended to
// each message
Expand Down Expand Up @@ -47,7 +47,7 @@ func Test_parseMsgShares(t *testing.T) {

shares, _ := SplitMessages(nil, msgs.MessagesList)

parsedMsgs, err := parseMsgShares(shares)
parsedMsgs, err := parseSparseShares(shares)
if err != nil {
t.Error(err)
}
Expand All @@ -64,7 +64,7 @@ func Test_parseMsgShares(t *testing.T) {
msgs := generateRandomlySizedMessages(tc.msgCount, tc.msgSize)
shares, _ := SplitMessages(nil, msgs.MessagesList)

parsedMsgs, err := parseMsgShares(shares)
parsedMsgs, err := parseSparseShares(shares)
if err != nil {
t.Error(err)
}
Expand All @@ -79,7 +79,7 @@ func Test_parseMsgShares(t *testing.T) {
}

func TestParsePaddedMsg(t *testing.T) {
msgWr := NewMessageShareSplitter()
msgWr := NewSparseShareSplitter()
randomSmallMsg := generateRandomMessage(100)
randomLargeMsg := generateRandomMessage(10000)
msgs := coretypes.Messages{
Expand All @@ -93,7 +93,7 @@ func TestParsePaddedMsg(t *testing.T) {
msgWr.WriteNamespacedPaddedShares(4)
msgWr.Write(msgs.MessagesList[1])
msgWr.WriteNamespacedPaddedShares(10)
pmsgs, err := parseMsgShares(msgWr.Export().RawShares())
pmsgs, err := parseSparseShares(msgWr.Export().RawShares())
require.NoError(t, err)
require.Equal(t, msgs.MessagesList, pmsgs)
}
Loading

0 comments on commit 7bbb270

Please sign in to comment.