diff --git a/client_test.go b/client_test.go index ab8d49c..5ecf35f 100644 --- a/client_test.go +++ b/client_test.go @@ -12,7 +12,6 @@ import ( "testing" "time" - "cosmossdk.io/math" "github.com/ory/dockertest/v3" "github.com/stretchr/testify/suite" @@ -127,22 +126,21 @@ func (t *TestSuite) TestRoundTrip() { blobBlob, err := blob.NewBlobV0(namespace, data) t.Require().NoError(err) + com, err := blob.CreateCommitment(blobBlob) + t.Require().NoError(err) + // write blob to DA - txResponse, err := client.State.SubmitPayForBlob(ctx, math.NewInt(100000000), 200000000, []*blob.Blob{blobBlob}) + height, err := client.Blob.Submit(ctx, []*blob.Blob{blobBlob}) t.Require().NoError(err) - t.Require().NotNil(txResponse) - t.Zero(txResponse.Code) - t.NotZero(txResponse.Height) + t.Require().NotZero(height) ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() // retrieve data back from DA - blobs, err := client.Blob.GetAll(ctx, uint64(txResponse.Height), []share.Namespace{namespace}) + daBlob, err := client.Blob.Get(ctx, height, namespace, com) t.Require().NoError(err) - t.Require().NotEmpty(blobs) - t.Len(blobs, 1) - t.Require().NotNil(blobs[0]) - t.Equal(data, blobs[0].Data) + t.Require().NotNil(daBlob) + t.Equal(data, daBlob.Data) } func (t *TestSuite) getRPCAddress() string { diff --git a/go.mod b/go.mod index 2372e5e..9c26a7e 100644 --- a/go.mod +++ b/go.mod @@ -7,13 +7,14 @@ require ( github.com/celestiaorg/go-fraud v0.1.2 github.com/celestiaorg/go-header v0.2.12 github.com/celestiaorg/nmt v0.17.0 - github.com/celestiaorg/rsmt2d v0.9.0 + github.com/celestiaorg/rsmt2d v0.10.0 github.com/cometbft/cometbft v0.37.0 github.com/filecoin-project/go-jsonrpc v0.3.1 github.com/gogo/protobuf v1.3.2 github.com/libp2p/go-libp2p v0.27.3 github.com/ory/dockertest/v3 v3.10.0 github.com/stretchr/testify v1.8.4 + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 ) require ( @@ -21,11 +22,11 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/benbjohnson/clock v1.3.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/containerd/continuity v0.4.1 // indirect - github.com/cosmos/gogoproto v1.4.1 // indirect + github.com/cosmos/gogoproto v1.4.10 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/docker/cli v24.0.2+incompatible // indirect @@ -38,9 +39,11 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect + github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/ipfs/go-cid v0.4.1 // indirect @@ -70,7 +73,7 @@ require ( github.com/opencontainers/image-spec v1.1.0-rc2 // indirect github.com/opencontainers/runc v1.1.7 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect @@ -88,18 +91,18 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/crypto v0.10.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/net v0.11.0 // indirect golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.9.0 // indirect golang.org/x/text v0.10.0 // indirect golang.org/x/tools v0.10.0 // indirect - golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect - google.golang.org/grpc v1.52.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.4.0 // indirect lukechampine.com/blake3 v1.1.7 // indirect ) diff --git a/go.sum b/go.sum index f2a99e1..8a74cc4 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= -github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/celestiaorg/go-fraud v0.1.2 h1:Bf7yIN3lZ4IR/Vlu5OtmcVCVNESBKEJ/xwu28rRKGA8= @@ -24,8 +24,8 @@ github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= github.com/celestiaorg/nmt v0.17.0 h1:/k8YLwJvuHgT/jQ435zXKaDX811+sYEMXL4B/vYdSLU= github.com/celestiaorg/nmt v0.17.0/go.mod h1:ZndCeAR4l9lxm7W51ouoyTo1cxhtFgK+4DpEIkxRA3A= -github.com/celestiaorg/rsmt2d v0.9.0 h1:kon78I748ZqjNzI8OAqPN+2EImuZuanj/6gTh8brX3o= -github.com/celestiaorg/rsmt2d v0.9.0/go.mod h1:E06nDxfoeBDltWRvTR9dLviiUZI5/6mLXAuhSJzz3Iw= +github.com/celestiaorg/rsmt2d v0.10.0 h1:8dprr6CW5mCk5YPnbiLdirojw9YsJOE+XB+GORb8sT0= +github.com/celestiaorg/rsmt2d v0.10.0/go.mod h1:BiCZkCJfhDHUEOJKXUeu+CudjluecKvRTqHcuxKvodc= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -39,8 +39,8 @@ github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaD github.com/containerd/continuity v0.4.1 h1:wQnVrjIyQ8vhU2sgOiL5T07jo+ouqc2bnKsv5/EqGhU= github.com/containerd/continuity v0.4.1/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/cosmos/gogoproto v1.4.1 h1:WoyH+0/jbCTzpKNvyav5FL1ZTWsp1im1MxEpJEzKUB8= -github.com/cosmos/gogoproto v1.4.1/go.mod h1:Ac9lzL4vFpBMcptJROQ6dQ4M3pOEK5Z/l0Q9p+LoCr4= +github.com/cosmos/gogoproto v1.4.10 h1:QH/yT8X+c0F4ZDacDv3z+xE3WU1P1Z3wQoLMBRJoKuI= +github.com/cosmos/gogoproto v1.4.10/go.mod h1:3aAZzeRWpAwr+SS/LLkICX2/kDFyaYVzckBDzygIxek= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -105,6 +105,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= @@ -115,7 +116,8 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= @@ -207,8 +209,9 @@ github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4a github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -317,6 +320,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -339,6 +343,7 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= @@ -346,22 +351,22 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -373,8 +378,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -385,8 +390,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= diff --git a/types/appconsts/global_consts.go b/types/appconsts/global_consts.go index 1d6c9f8..1c99191 100644 --- a/types/appconsts/global_consts.go +++ b/types/appconsts/global_consts.go @@ -75,4 +75,7 @@ const ( var ( // DefaultCodec is the default codec creator used for data erasure. DefaultCodec = rsmt2d.NewLeoRSCodec + + // SupportedShareVersions is a list of supported share versions. + SupportedShareVersions = []uint8{ShareVersionZero} ) diff --git a/types/appconsts/v1/app_consts.go b/types/appconsts/v1/app_consts.go new file mode 100644 index 0000000..72b040f --- /dev/null +++ b/types/appconsts/v1/app_consts.go @@ -0,0 +1,7 @@ +package v1 + +const ( + Version uint64 = 1 + SquareSizeUpperBound int = 128 + SubtreeRootThreshold int = 64 +) diff --git a/types/appconsts/versioned_consts.go b/types/appconsts/versioned_consts.go new file mode 100644 index 0000000..9e9a66f --- /dev/null +++ b/types/appconsts/versioned_consts.go @@ -0,0 +1,31 @@ +package appconsts + +import v1 "github.com/rollkit/celestia-openrpc/types/appconsts/v1" + +const ( + LatestVersion = v1.Version +) + +// SubtreeRootThreshold works as a target upper bound for the number of subtree +// roots in the share commitment. If a blob contains more shares than this +// number, then the height of the subtree roots will increase by one so that the +// number of subtree roots in the share commitment decreases by a factor of two. +// This step is repeated until the number of subtree roots is less than the +// SubtreeRootThreshold. +// +// The rationale for this value is described in more detail in ADR-013. +func SubtreeRootThreshold(_ uint64) int { + return v1.SubtreeRootThreshold +} + +// SquareSizeUpperBound is the maximum original square width possible +// for a version of the state machine. The maximum is decided through +// governance. See `DefaultGovMaxSquareSize`. +func SquareSizeUpperBound(_ uint64) int { + return v1.SquareSizeUpperBound +} + +var ( + DefaultSubtreeRootThreshold = SubtreeRootThreshold(LatestVersion) + DefaultSquareSizeUpperBound = SquareSizeUpperBound(LatestVersion) +) diff --git a/types/blob/blob.go b/types/blob/blob.go index 5dae022..101ecd6 100644 --- a/types/blob/blob.go +++ b/types/blob/blob.go @@ -79,8 +79,16 @@ func (p *Proof) UnmarshalJSON(data []byte) error { return nil } +type jsonBlob struct { + Namespace share.Namespace `json:"namespace"` + Data []byte `json:"data"` + ShareVersion uint32 `json:"share_version"` + Commitment Commitment `json:"commitment"` +} + // Blob represents any application-specific binary data that anyone can submit to Celestia. type Blob struct { + // NOTE: Namespace _must_ include both version and id bytes Namespace []byte `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` ShareVersion uint32 `protobuf:"varint,3,opt,name=share_version,json=shareVersion,proto3" json:"share_version,omitempty"` @@ -88,37 +96,6 @@ type Blob struct { Commitment []byte `protobuf:"bytes,5,opt,name=commitment,proto3" json:"commitment,omitempty"` } -// NewBlobV0 constructs a new blob from the provided Namespace and data. -// The blob will be formatted as v0 shares. -func NewBlobV0(namespace share.Namespace, data []byte) (*Blob, error) { - return NewBlob(appconsts.ShareVersionZero, namespace, data) -} - -// NewBlob constructs a new blob from the provided Namespace, data and share version. -func NewBlob(shareVersion uint8, namespace share.Namespace, data []byte) (*Blob, error) { - if len(data) == 0 || len(data) > appconsts.DefaultMaxBytes { - return nil, fmt.Errorf("blob data must be > 0 && <= %d, but it was %d bytes", appconsts.DefaultMaxBytes, len(data)) - } - if err := namespace.ValidateForBlob(); err != nil { - return nil, err - } - - return &Blob{ - Namespace: namespace, - Data: data, - ShareVersion: uint32(shareVersion), - NamespaceVersion: 0, - Commitment: []byte{}, - }, nil -} - -type jsonBlob struct { - Namespace share.Namespace `json:"namespace"` - Data []byte `json:"data"` - ShareVersion uint32 `json:"share_version"` - Commitment Commitment `json:"commitment"` -} - func (b *Blob) MarshalJSON() ([]byte, error) { blob := &jsonBlob{ Namespace: b.Namespace, @@ -135,7 +112,6 @@ func (b *Blob) UnmarshalJSON(data []byte) error { if err != nil { return err } - b.NamespaceVersion = uint32(blob.Namespace.Version()) b.Data = blob.Data b.ShareVersion = blob.ShareVersion @@ -143,3 +119,27 @@ func (b *Blob) UnmarshalJSON(data []byte) error { b.Namespace = blob.Namespace return nil } + +// NewBlobV0 constructs a new blob from the provided Namespace and data. +// The blob will be formatted as v0 shares. +func NewBlobV0(namespace share.Namespace, data []byte) (*Blob, error) { + return NewBlob(appconsts.ShareVersionZero, namespace, data) +} + +// NewBlob constructs a new blob from the provided Namespace, data and share version. +func NewBlob(shareVersion uint8, namespace share.Namespace, data []byte) (*Blob, error) { + if len(data) == 0 || len(data) > appconsts.DefaultMaxBytes { + return nil, fmt.Errorf("blob data must be > 0 && <= %d, but it was %d bytes", appconsts.DefaultMaxBytes, len(data)) + } + if err := namespace.ValidateForBlob(); err != nil { + return nil, err + } + + return &Blob{ + Namespace: namespace, + Data: data, + ShareVersion: uint32(shareVersion), + NamespaceVersion: 0, + Commitment: []byte{}, + }, nil +} diff --git a/types/blob/blob_test.go b/types/blob/blob_test.go new file mode 100644 index 0000000..656db3f --- /dev/null +++ b/types/blob/blob_test.go @@ -0,0 +1,48 @@ +package blob + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/rollkit/celestia-openrpc/types/appconsts" + appns "github.com/rollkit/celestia-openrpc/types/namespace" +) + +func TestBlobMarshalUnmarshal(t *testing.T) { + tests := []struct { + name string + blobJSON string + blob *Blob + }{ + { + "valid blob", + `{"namespace":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAQIDBAUGBwg=","data":"aGVsbG8gd29ybGQ=","share_version":0,"commitment":"I6VBbcCIpcliy0hYTCLdX13m18ImVdABclJupNGueko="}`, + &Blob{ + Namespace: append(bytes.Repeat([]byte{0x00}, 21), []byte{1, 2, 3, 4, 5, 6, 7, 8}...), + Data: []byte("hello world"), + ShareVersion: uint32(appconsts.ShareVersionZero), + NamespaceVersion: uint32(appns.NamespaceVersionZero), + Commitment: []byte{0x23, 0xa5, 0x41, 0x6d, 0xc0, 0x88, 0xa5, 0xc9, 0x62, 0xcb, 0x48, 0x58, 0x4c, 0x22, 0xdd, 0x5f, 0x5d, 0xe6, 0xd7, 0xc2, 0x26, 0x55, 0xd0, 0x1, 0x72, 0x52, 0x6e, 0xa4, 0xd1, 0xae, 0x7a, 0x4a}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + blob := &Blob{} + err := blob.UnmarshalJSON([]byte(tt.blobJSON)) + require.NoError(t, err) + + require.Equal(t, tt.blob.ShareVersion, blob.ShareVersion) + require.Equal(t, tt.blob.NamespaceVersion, blob.NamespaceVersion) + require.Equal(t, tt.blob.Namespace, blob.Namespace) + require.Equal(t, tt.blob.Data, blob.Data) + require.Equal(t, tt.blob.Commitment, blob.Commitment) + + blobJSON, err := blob.MarshalJSON() + require.NoError(t, err) + require.Equal(t, tt.blobJSON, string(blobJSON)) + }) + } +} diff --git a/types/blob/commitment.go b/types/blob/commitment.go new file mode 100644 index 0000000..406605a --- /dev/null +++ b/types/blob/commitment.go @@ -0,0 +1,110 @@ +package blob + +import ( + "crypto/sha256" + + "github.com/celestiaorg/nmt" + + "github.com/rollkit/celestia-openrpc/types/appconsts" + "github.com/rollkit/celestia-openrpc/types/share" +) + +// CreateCommitment generates the share commitment for a given blob. +// See [data square layout rationale] and [blob share commitment rules]. +// +// [data square layout rationale]: ../../specs/src/specs/data_square_layout.md +// [blob share commitment rules]: ../../specs/src/specs/data_square_layout.md#blob-share-commitment-rules +// NOTE: We assume Blob.Namespace contains both id and version bytes +func CreateCommitment(blob *Blob) ([]byte, error) { + shares, err := SplitBlobs(*blob) + if err != nil { + return nil, err + } + + // the commitment is the root of a merkle mountain range with max tree size + // determined by the number of roots required to create a share commitment + // over that blob. The size of the tree is only increased if the number of + // subtree roots surpasses a constant threshold. + subTreeWidth := share.SubTreeWidth(len(shares), appconsts.DefaultSubtreeRootThreshold) + treeSizes, err := merkleMountainRangeSizes(uint64(len(shares)), uint64(subTreeWidth)) + if err != nil { + return nil, err + } + leafSets := make([][][]byte, len(treeSizes)) + cursor := uint64(0) + for i, treeSize := range treeSizes { + leafSets[i] = share.ToBytes(shares[cursor : cursor+treeSize]) + cursor = cursor + treeSize + } + + // create the commitments by pushing each leaf set onto an nmt + subTreeRoots := make([][]byte, len(leafSets)) + for i, set := range leafSets { + // create the nmt todo(evan) use nmt wrapper + tree := nmt.New(sha256.New(), nmt.NamespaceIDSize(appconsts.NamespaceSize), nmt.IgnoreMaxNamespace(true)) + for _, leaf := range set { + namespace := blob.Namespace + // the namespace must be added again here even though it is already + // included in the leaf to ensure that the hash will match that of + // the nmt wrapper (pkg/wrapper). Each namespace is added to keep + // the namespace in the share, and therefore the parity data, while + // also allowing for the manual addition of the parity namespace to + // the parity data. + nsLeaf := make([]byte, 0) + nsLeaf = append(nsLeaf, namespace...) + nsLeaf = append(nsLeaf, leaf...) + + err = tree.Push(nsLeaf) + if err != nil { + return nil, err + } + } + // add the root + root, err := tree.Root() + if err != nil { + return nil, err + } + subTreeRoots[i] = root + } + return HashFromByteSlices(subTreeRoots), nil +} + +func CreateCommitments(blobs []*Blob) ([][]byte, error) { + commitments := make([][]byte, len(blobs)) + for i, blob := range blobs { + commitment, err := CreateCommitment(blob) + if err != nil { + return nil, err + } + commitments[i] = commitment + } + return commitments, nil +} + +// merkleMountainRangeSizes returns the sizes (number of leaf nodes) of the +// trees in a merkle mountain range constructed for a given totalSize and +// maxTreeSize. +// +// https://docs.grin.mw/wiki/chain-state/merkle-mountain-range/ +// https://github.com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.md +// TODO: potentially rename function because this doesn't return heights +func merkleMountainRangeSizes(totalSize, maxTreeSize uint64) ([]uint64, error) { + var treeSizes []uint64 + + for totalSize != 0 { + switch { + case totalSize >= maxTreeSize: + treeSizes = append(treeSizes, maxTreeSize) + totalSize = totalSize - maxTreeSize + case totalSize < maxTreeSize: + treeSize, err := share.RoundDownPowerOfTwo(totalSize) + if err != nil { + return treeSizes, err + } + treeSizes = append(treeSizes, treeSize) + totalSize = totalSize - treeSize + } + } + + return treeSizes, nil +} diff --git a/types/blob/merkle.go b/types/blob/merkle.go new file mode 100644 index 0000000..bdc4f69 --- /dev/null +++ b/types/blob/merkle.go @@ -0,0 +1,188 @@ +package blob + +import ( + "crypto/sha256" + "hash" + "math/bits" +) + +const ( + Size = sha256.Size + BlockSize = sha256.BlockSize +) + +// New returns a new hash.Hash. +func New() hash.Hash { + return sha256.New() +} + +// Sum returns the SHA256 of the bz. +func Sum(bz []byte) []byte { + h := sha256.Sum256(bz) + return h[:] +} + +//------------------------------------------------------------- + +const ( + TruncatedSize = 20 +) + +type sha256trunc struct { + sha256 hash.Hash +} + +func (h sha256trunc) Write(p []byte) (n int, err error) { + return h.sha256.Write(p) +} +func (h sha256trunc) Sum(b []byte) []byte { + shasum := h.sha256.Sum(b) + return shasum[:TruncatedSize] +} + +func (h sha256trunc) Reset() { + h.sha256.Reset() +} + +func (h sha256trunc) Size() int { + return TruncatedSize +} + +func (h sha256trunc) BlockSize() int { + return h.sha256.BlockSize() +} + +// NewTruncated returns a new hash.Hash. +func NewTruncated() hash.Hash { + return sha256trunc{ + sha256: sha256.New(), + } +} + +// SumTruncated returns the first 20 bytes of SHA256 of the bz. +func SumTruncated(bz []byte) []byte { + hash := sha256.Sum256(bz) + return hash[:TruncatedSize] +} + +// TODO: make these have a large predefined capacity +var ( + leafPrefix = []byte{0} + innerPrefix = []byte{1} +) + +// returns tmhash() +func emptyHash() []byte { + return Sum([]byte{}) +} + +// returns tmhash(0x00 || leaf) +func leafHash(leaf []byte) []byte { + return Sum(append(leafPrefix, leaf...)) +} + +// returns tmhash(0x01 || left || right) +func innerHash(left []byte, right []byte) []byte { + return Sum(append(innerPrefix, append(left, right...)...)) +} + +// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice, +// in the provided order. It follows RFC-6962. +func HashFromByteSlices(items [][]byte) []byte { + switch len(items) { + case 0: + return emptyHash() + case 1: + return leafHash(items[0]) + default: + k := getSplitPoint(int64(len(items))) + left := HashFromByteSlices(items[:k]) + right := HashFromByteSlices(items[k:]) + return innerHash(left, right) + } +} + +// HashFromByteSliceIterative is an iterative alternative to +// HashFromByteSlice motivated by potential performance improvements. +// (#2611) had suggested that an iterative version of +// HashFromByteSlice would be faster, presumably because +// we can envision some overhead accumulating from stack +// frames and function calls. Additionally, a recursive algorithm risks +// hitting the stack limit and causing a stack overflow should the tree +// be too large. +// +// Provided here is an iterative alternative, a test to assert +// correctness and a benchmark. On the performance side, there appears to +// be no overall difference: +// +// BenchmarkHashAlternatives/recursive-4 20000 77677 ns/op +// BenchmarkHashAlternatives/iterative-4 20000 76802 ns/op +// +// On the surface it might seem that the additional overhead is due to +// the different allocation patterns of the implementations. The recursive +// version uses a single [][]byte slices which it then re-slices at each level of the tree. +// The iterative version reproduces [][]byte once within the function and +// then rewrites sub-slices of that array at each level of the tree. +// +// Experimenting by modifying the code to simply calculate the +// hash and not store the result show little to no difference in performance. +// +// These preliminary results suggest: +// +// 1. The performance of the HashFromByteSlice is pretty good +// 2. Go has low overhead for recursive functions +// 3. The performance of the HashFromByteSlice routine is dominated +// by the actual hashing of data +// +// Although this work is in no way exhaustive, point #3 suggests that +// optimization of this routine would need to take an alternative +// approach to make significant improvements on the current performance. +// +// Finally, considering that the recursive implementation is easier to +// read, it might not be worthwhile to switch to a less intuitive +// implementation for so little benefit. +func HashFromByteSlicesIterative(input [][]byte) []byte { + items := make([][]byte, len(input)) + + for i, leaf := range input { + items[i] = leafHash(leaf) + } + + size := len(items) + for { + switch size { + case 0: + return emptyHash() + case 1: + return items[0] + default: + rp := 0 // read position + wp := 0 // write position + for rp < size { + if rp+1 < size { + items[wp] = innerHash(items[rp], items[rp+1]) + rp += 2 + } else { + items[wp] = items[rp] + rp++ + } + wp++ + } + size = wp + } + } +} + +// getSplitPoint returns the largest power of 2 less than length +func getSplitPoint(length int64) int64 { + if length < 1 { + panic("Trying to split a tree with size < 1") + } + uLength := uint(length) + bitlen := bits.Len(uLength) + k := int64(1 << uint(bitlen-1)) + if k == length { + k >>= 1 + } + return k +} diff --git a/types/blob/share_splitting.go b/types/blob/share_splitting.go new file mode 100644 index 0000000..d1eb7a1 --- /dev/null +++ b/types/blob/share_splitting.go @@ -0,0 +1,14 @@ +package blob + +import "github.com/rollkit/celestia-openrpc/types/share" + +// SplitBlobs splits the provided blobs into shares. +func SplitBlobs(blobs ...Blob) ([]share.Share, error) { + writer := share.NewSparseShareSplitter() + for _, blob := range blobs { + if err := writer.Write(blob.NamespaceVersion, blob.Namespace, blob.Data); err != nil { + return nil, err + } + } + return writer.Export(), nil +} diff --git a/types/core/core.go b/types/core/core.go index 98e624d..8464a51 100644 --- a/types/core/core.go +++ b/types/core/core.go @@ -130,18 +130,27 @@ type DataAvailabilityHeader struct { hash []byte } -// NewDataAvailabilityHeader generates a DataAvailability header using the provided square size and shares -func NewDataAvailabilityHeader(eds *rsmt2d.ExtendedDataSquare) DataAvailabilityHeader { - // generate the row and col roots using the EDS +// NewDataAvailabilityHeader generates a DataAvailability header using the +// provided extended data square. +func NewDataAvailabilityHeader(eds *rsmt2d.ExtendedDataSquare) (DataAvailabilityHeader, error) { + rowRoots, err := eds.RowRoots() + if err != nil { + return DataAvailabilityHeader{}, err + } + colRoots, err := eds.ColRoots() + if err != nil { + return DataAvailabilityHeader{}, err + } + dah := DataAvailabilityHeader{ - RowRoots: eds.RowRoots(), - ColumnRoots: eds.ColRoots(), + RowRoots: rowRoots, + ColumnRoots: colRoots, } - // generate the hash of the data using the new roots + // Generate the hash of the data using the new roots dah.Hash() - return dah + return dah, nil } // String returns hex representation of merkle hash of the DAHeader. diff --git a/types/share/blob_share_commitment_rules.go b/types/share/blob_share_commitment_rules.go new file mode 100644 index 0000000..069982b --- /dev/null +++ b/types/share/blob_share_commitment_rules.go @@ -0,0 +1,44 @@ +package share + +import ( + math "math" + + "golang.org/x/exp/constraints" +) + +// BlobMinSquareSize returns the minimum square size that can contain shareCount +// number of shares. +func BlobMinSquareSize(shareCount int) int { + return RoundUpPowerOfTwo(int(math.Ceil(math.Sqrt(float64(shareCount))))) +} + +// SubTreeWidth determines the maximum number of leaves per subtree in the share +// commitment over a given blob. The input should be the total number of shares +// used by that blob. The reasoning behind this algorithm is discussed in depth +// in ADR013 +// (celestia-app/docs/architecture/adr-013-non-interative-default-rules-for-zero-padding). +func SubTreeWidth(shareCount, subtreeRootThreshold int) int { + // per ADR013, we use a predetermined threshold to determine width of sub + // trees used to create share commitments + s := (shareCount / subtreeRootThreshold) + + // round up if the width is not an exact multiple of the threshold + if shareCount%subtreeRootThreshold != 0 { + s++ + } + + // use a power of two equal to or larger than the multiple of the subtree + // root threshold + s = RoundUpPowerOfTwo(s) + + // use the minimum of the subtree width and the min square size, this + // gurarantees that a valid value is returned + return min(s, BlobMinSquareSize(shareCount)) +} + +func min[T constraints.Integer](i, j T) T { + if i < j { + return i + } + return j +} diff --git a/types/share/info_byte.go b/types/share/info_byte.go new file mode 100644 index 0000000..d911b03 --- /dev/null +++ b/types/share/info_byte.go @@ -0,0 +1,43 @@ +package share + +import ( + "fmt" + + "github.com/rollkit/celestia-openrpc/types/appconsts" +) + +// InfoByte is a byte with the following structure: the first 7 bits are +// reserved for version information in big endian form (initially `0000000`). +// The last bit is a "sequence start indicator", that is `1` if this is the +// first share of a sequence and `0` if this is a continuation share. +type InfoByte byte + +func NewInfoByte(version uint8, isSequenceStart bool) (InfoByte, error) { + if version > appconsts.MaxShareVersion { + return 0, fmt.Errorf("version %d must be less than or equal to %d", version, appconsts.MaxShareVersion) + } + + prefix := version << 1 + if isSequenceStart { + return InfoByte(prefix + 1), nil + } + return InfoByte(prefix), nil +} + +// Version returns the version encoded in this InfoByte. Version is +// expected to be between 0 and appconsts.MaxShareVersion (inclusive). +func (i InfoByte) Version() uint8 { + version := uint8(i) >> 1 + return version +} + +// IsSequenceStart returns whether this share is the start of a sequence. +func (i InfoByte) IsSequenceStart() bool { + return uint(i)%2 == 1 +} + +func ParseInfoByte(i byte) (InfoByte, error) { + isSequenceStart := i%2 == 1 + version := uint8(i) >> 1 + return NewInfoByte(version, isSequenceStart) +} diff --git a/types/share/padding.go b/types/share/padding.go new file mode 100644 index 0000000..5b4b202 --- /dev/null +++ b/types/share/padding.go @@ -0,0 +1,48 @@ +package share + +import ( + "bytes" + "errors" + + "github.com/rollkit/celestia-openrpc/types/appconsts" + "github.com/rollkit/celestia-openrpc/types/namespace" +) + +// NamespacePaddingShare returns a share that acts as padding. Namespace padding +// shares follow a blob so that the next blob may start at an index that +// conforms to blob share commitment rules. The ns parameter provided should +// be the namespace of the blob that precedes this padding in the data square. +func NamespacePaddingShare(ns namespace.Namespace) (Share, error) { + b, err := NewBuilder(ns, appconsts.ShareVersionZero, true).Init() + if err != nil { + return Share{}, err + } + if err := b.WriteSequenceLen(0); err != nil { + return Share{}, err + } + padding := bytes.Repeat([]byte{0}, appconsts.FirstSparseShareContentSize) + b.AddData(padding) + + paddingShare, err := b.Build() + if err != nil { + return Share{}, err + } + + return *paddingShare, nil +} + +// NamespacePaddingShares returns n namespace padding shares. +func NamespacePaddingShares(ns namespace.Namespace, n int) ([]Share, error) { + var err error + if n < 0 { + return nil, errors.New("n must be positive") + } + shares := make([]Share, n) + for i := 0; i < n; i++ { + shares[i], err = NamespacePaddingShare(ns) + if err != nil { + return shares, err + } + } + return shares, nil +} diff --git a/types/share/powers_of_two.go b/types/share/powers_of_two.go new file mode 100644 index 0000000..a1f9529 --- /dev/null +++ b/types/share/powers_of_two.go @@ -0,0 +1,28 @@ +package share + +import ( + "fmt" + + "golang.org/x/exp/constraints" +) + +// RoundDownPowerOfTwo returns the next power of two less than or equal to input. +func RoundDownPowerOfTwo[I constraints.Integer](input I) (I, error) { + if input <= 0 { + return 0, fmt.Errorf("input %v must be positive", input) + } + roundedUp := RoundUpPowerOfTwo(input) + if roundedUp == input { + return roundedUp, nil + } + return roundedUp / 2, nil +} + +// RoundUpPowerOfTwo returns the next power of two greater than or equal to input. +func RoundUpPowerOfTwo[I constraints.Integer](input I) I { + var result I = 1 + for result < input { + result = result << 1 + } + return result +} diff --git a/types/share/reserved_bytes.go b/types/share/reserved_bytes.go new file mode 100644 index 0000000..dd1fbda --- /dev/null +++ b/types/share/reserved_bytes.go @@ -0,0 +1,33 @@ +package share + +import ( + "encoding/binary" + "fmt" + + "github.com/rollkit/celestia-openrpc/types/appconsts" +) + +// NewReservedBytes returns a byte slice of length +// appconsts.CompactShareReservedBytes that contains the byteIndex of the first +// unit that starts in a compact share. +func NewReservedBytes(byteIndex uint32) ([]byte, error) { + if byteIndex >= appconsts.ShareSize { + return []byte{}, fmt.Errorf("byte index %d must be less than share size %d", byteIndex, appconsts.ShareSize) + } + reservedBytes := make([]byte, appconsts.CompactShareReservedBytes) + binary.BigEndian.PutUint32(reservedBytes, byteIndex) + return reservedBytes, nil +} + +// ParseReservedBytes parses a byte slice of length +// appconsts.CompactShareReservedBytes into a byteIndex. +func ParseReservedBytes(reservedBytes []byte) (uint32, error) { + if len(reservedBytes) != appconsts.CompactShareReservedBytes { + return 0, fmt.Errorf("reserved bytes must be of length %d", appconsts.CompactShareReservedBytes) + } + byteIndex := binary.BigEndian.Uint32(reservedBytes) + if appconsts.ShareSize <= byteIndex { + return 0, fmt.Errorf("byteIndex must be less than share size %d", appconsts.ShareSize) + } + return byteIndex, nil +} diff --git a/types/share/share.go b/types/share/share.go index d4574ad..ab00c80 100644 --- a/types/share/share.go +++ b/types/share/share.go @@ -1,12 +1,15 @@ package share import ( + "bytes" + "encoding/binary" "fmt" "github.com/celestiaorg/nmt" "github.com/rollkit/celestia-openrpc/types/appconsts" "github.com/rollkit/celestia-openrpc/types/core" + "github.com/rollkit/celestia-openrpc/types/namespace" ) // Root represents root commitment to multiple Shares. @@ -36,16 +39,250 @@ const ( // NOTE: Alias for the byte is chosen to keep maximal compatibility, especially with rsmt2d. // Ideally, we should define reusable type elsewhere and make everyone(Core, rsmt2d, ipld) to rely // on it. -type Share = []byte +// Share contains the raw share data (including namespace ID). +type Share struct { + data []byte +} + +func (s *Share) Namespace() (namespace.Namespace, error) { + if len(s.data) < appconsts.NamespaceSize { + panic(fmt.Sprintf("share %s is too short to contain a namespace", s)) + } + return namespace.From(s.data[:appconsts.NamespaceSize]) +} + +func (s *Share) InfoByte() (InfoByte, error) { + if len(s.data) < namespace.NamespaceSize+appconsts.ShareInfoBytes { + return 0, fmt.Errorf("share %s is too short to contain an info byte", s) + } + // the info byte is the first byte after the namespace + unparsed := s.data[namespace.NamespaceSize] + return ParseInfoByte(unparsed) +} + +func NewShare(data []byte) (*Share, error) { + if err := validateSize(data); err != nil { + return nil, err + } + return &Share{data}, nil +} + +func (s *Share) Validate() error { + return validateSize(s.data) +} + +func validateSize(data []byte) error { + if len(data) != appconsts.ShareSize { + return fmt.Errorf("share data must be %d bytes, got %d", appconsts.ShareSize, len(data)) + } + return nil +} + +func (s *Share) Len() int { + return len(s.data) +} + +func (s *Share) Version() (uint8, error) { + infoByte, err := s.InfoByte() + if err != nil { + return 0, err + } + return infoByte.Version(), nil +} + +func (s *Share) DoesSupportVersions(supportedShareVersions []uint8) error { + ver, err := s.Version() + if err != nil { + return err + } + if !bytes.Contains(supportedShareVersions, []byte{ver}) { + return fmt.Errorf("unsupported share version %v is not present in the list of supported share versions %v", ver, supportedShareVersions) + } + return nil +} + +// IsSequenceStart returns true if this is the first share in a sequence. +func (s *Share) IsSequenceStart() (bool, error) { + infoByte, err := s.InfoByte() + if err != nil { + return false, err + } + return infoByte.IsSequenceStart(), nil +} + +// IsCompactShare returns true if this is a compact share. +func (s Share) IsCompactShare() (bool, error) { + ns, err := s.Namespace() + if err != nil { + return false, err + } + isCompact := ns.IsTx() || ns.IsPayForBlob() + return isCompact, nil +} + +// SequenceLen returns the sequence length of this *share and optionally an +// error. It returns 0, nil if this is a continuation share (i.e. doesn't +// contain a sequence length). +func (s *Share) SequenceLen() (sequenceLen uint32, err error) { + isSequenceStart, err := s.IsSequenceStart() + if err != nil { + return 0, err + } + if !isSequenceStart { + return 0, nil + } + + start := appconsts.NamespaceSize + appconsts.ShareInfoBytes + end := start + appconsts.SequenceLenBytes + if len(s.data) < end { + return 0, fmt.Errorf("share %s with length %d is too short to contain a sequence length", + s, len(s.data)) + } + return binary.BigEndian.Uint32(s.data[start:end]), nil +} + +// IsPadding returns whether this *share is padding or not. +func (s *Share) IsPadding() (bool, error) { + isNamespacePadding, err := s.isNamespacePadding() + if err != nil { + return false, err + } + isTailPadding, err := s.isTailPadding() + if err != nil { + return false, err + } + isReservedPadding, err := s.isReservedPadding() + if err != nil { + return false, err + } + return isNamespacePadding || isTailPadding || isReservedPadding, nil +} + +func (s *Share) isNamespacePadding() (bool, error) { + isSequenceStart, err := s.IsSequenceStart() + if err != nil { + return false, err + } + sequenceLen, err := s.SequenceLen() + if err != nil { + return false, err + } + + return isSequenceStart && sequenceLen == 0, nil +} + +func (s *Share) isTailPadding() (bool, error) { + ns, err := s.Namespace() + if err != nil { + return false, err + } + return ns.IsTailPadding(), nil +} + +func (s *Share) isReservedPadding() (bool, error) { + ns, err := s.Namespace() + if err != nil { + return false, err + } + return ns.IsReservedPadding(), nil +} + +func (s *Share) ToBytes() []byte { + return s.data +} + +// RawData returns the raw share data. The raw share data does not contain the +// namespace ID, info byte, sequence length, or reserved bytes. +func (s *Share) RawData() (rawData []byte, err error) { + if len(s.data) < s.rawDataStartIndex() { + return rawData, fmt.Errorf("share %s is too short to contain raw data", s) + } + + return s.data[s.rawDataStartIndex():], nil +} + +func (s *Share) rawDataStartIndex() int { + isStart, err := s.IsSequenceStart() + if err != nil { + panic(err) + } + isCompact, err := s.IsCompactShare() + if err != nil { + panic(err) + } -// GetNamespace slices Namespace out of the Share. -func GetNamespace(s Share) Namespace { - return s[:appconsts.NamespaceSize] + index := appconsts.NamespaceSize + appconsts.ShareInfoBytes + if isStart { + index += appconsts.SequenceLenBytes + } + if isCompact { + index += appconsts.CompactShareReservedBytes + } + return index } -// GetData slices out data of the Share. -func GetData(s Share) []byte { - return s[appconsts.NamespaceSize:] +// RawDataWithReserved returns the raw share data while taking reserved bytes into account. +func (s *Share) RawDataUsingReserved() (rawData []byte, err error) { + rawDataStartIndexUsingReserved, err := s.rawDataStartIndexUsingReserved() + if err != nil { + return nil, err + } + + // This means share is the last share and does not have any transaction beginning in it + if rawDataStartIndexUsingReserved == 0 { + return []byte{}, nil + } + if len(s.data) < rawDataStartIndexUsingReserved { + return rawData, fmt.Errorf("share %s is too short to contain raw data", s) + } + + return s.data[rawDataStartIndexUsingReserved:], nil +} + +// rawDataStartIndexUsingReserved returns the start index of raw data while accounting for +// reserved bytes, if it exists in the share. +func (s *Share) rawDataStartIndexUsingReserved() (int, error) { + isStart, err := s.IsSequenceStart() + if err != nil { + return 0, err + } + isCompact, err := s.IsCompactShare() + if err != nil { + return 0, err + } + + index := appconsts.NamespaceSize + appconsts.ShareInfoBytes + if isStart { + index += appconsts.SequenceLenBytes + } + + if isCompact { + reservedBytes, err := ParseReservedBytes(s.data[index : index+appconsts.CompactShareReservedBytes]) + if err != nil { + return 0, err + } + return int(reservedBytes), nil + } + return index, nil +} + +func ToBytes(shares []Share) (bytes [][]byte) { + bytes = make([][]byte, len(shares)) + for i, share := range shares { + bytes[i] = []byte(share.data) + } + return bytes +} + +func FromBytes(bytes [][]byte) (shares []Share, err error) { + for _, b := range bytes { + share, err := NewShare(b) + if err != nil { + return nil, err + } + shares = append(shares, *share) + } + return shares, nil } // DataHash is a representation of the Root hash. diff --git a/types/share/share_builder.go b/types/share/share_builder.go new file mode 100644 index 0000000..9373338 --- /dev/null +++ b/types/share/share_builder.go @@ -0,0 +1,227 @@ +package share + +import ( + "encoding/binary" + "errors" + + "github.com/rollkit/celestia-openrpc/types/appconsts" + "github.com/rollkit/celestia-openrpc/types/namespace" +) + +type Builder struct { + namespace namespace.Namespace + shareVersion uint8 + isFirstShare bool + isCompactShare bool + rawShareData []byte +} + +func NewEmptyBuilder() *Builder { + return &Builder{ + rawShareData: make([]byte, 0, appconsts.ShareSize), + } +} + +// Init() needs to be called right after this method +func NewBuilder(ns namespace.Namespace, shareVersion uint8, isFirstShare bool) *Builder { + return &Builder{ + namespace: ns, + shareVersion: shareVersion, + isFirstShare: isFirstShare, + isCompactShare: isCompactShare(ns), + } +} + +func (b *Builder) Init() (*Builder, error) { + if b.isCompactShare { + if err := b.prepareCompactShare(); err != nil { + return nil, err + } + } else { + if err := b.prepareSparseShare(); err != nil { + return nil, err + } + } + + return b, nil +} + +func (b *Builder) AvailableBytes() int { + return appconsts.ShareSize - len(b.rawShareData) +} + +func (b *Builder) ImportRawShare(rawBytes []byte) *Builder { + b.rawShareData = rawBytes + return b +} + +func (b *Builder) AddData(rawData []byte) (rawDataLeftOver []byte) { + // find the len left in the pending share + pendingLeft := appconsts.ShareSize - len(b.rawShareData) + + // if we can simply add the tx to the share without creating a new + // pending share, do so and return + if len(rawData) <= pendingLeft { + b.rawShareData = append(b.rawShareData, rawData...) + return nil + } + + // if we can only add a portion of the rawData to the pending share, + // then we add it and add the pending share to the finalized shares. + chunk := rawData[:pendingLeft] + b.rawShareData = append(b.rawShareData, chunk...) + + // We need to finish this share and start a new one + // so we return the leftover to be written into a new share + return rawData[pendingLeft:] +} + +func (b *Builder) Build() (*Share, error) { + return NewShare(b.rawShareData) +} + +// IsEmptyShare returns true if no data has been written to the share +func (b *Builder) IsEmptyShare() bool { + expectedLen := appconsts.NamespaceSize + appconsts.ShareInfoBytes + if b.isCompactShare { + expectedLen += appconsts.CompactShareReservedBytes + } + if b.isFirstShare { + expectedLen += appconsts.SequenceLenBytes + } + return len(b.rawShareData) == expectedLen +} + +func (b *Builder) ZeroPadIfNecessary() (bytesOfPadding int) { + b.rawShareData, bytesOfPadding = zeroPadIfNecessary(b.rawShareData, appconsts.ShareSize) + return bytesOfPadding +} + +// isEmptyReservedBytes returns true if the reserved bytes are empty. +func (b *Builder) isEmptyReservedBytes() (bool, error) { + indexOfReservedBytes := b.indexOfReservedBytes() + reservedBytes, err := ParseReservedBytes(b.rawShareData[indexOfReservedBytes : indexOfReservedBytes+appconsts.CompactShareReservedBytes]) + if err != nil { + return false, err + } + return reservedBytes == 0, nil +} + +// indexOfReservedBytes returns the index of the reserved bytes in the share. +func (b *Builder) indexOfReservedBytes() int { + if b.isFirstShare { + // if the share is the first share, the reserved bytes follow the namespace, info byte, and sequence length + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + appconsts.SequenceLenBytes + } + // if the share is not the first share, the reserved bytes follow the namespace and info byte + return appconsts.NamespaceSize + appconsts.ShareInfoBytes +} + +// indexOfInfoBytes returns the index of the InfoBytes. +func (b *Builder) indexOfInfoBytes() int { + // the info byte is immediately after the namespace + return appconsts.NamespaceSize +} + +// MaybeWriteReservedBytes will be a no-op if the reserved bytes +// have already been populated. If the reserved bytes are empty, it will write +// the location of the next unit of data to the reserved bytes. +func (b *Builder) MaybeWriteReservedBytes() error { + if !b.isCompactShare { + return errors.New("this is not a compact share") + } + + empty, err := b.isEmptyReservedBytes() + if err != nil { + return err + } + if !empty { + return nil + } + + byteIndexOfNextUnit := len(b.rawShareData) + reservedBytes, err := NewReservedBytes(uint32(byteIndexOfNextUnit)) + if err != nil { + return err + } + + indexOfReservedBytes := b.indexOfReservedBytes() + // overwrite the reserved bytes of the pending share + for i := 0; i < appconsts.CompactShareReservedBytes; i++ { + b.rawShareData[indexOfReservedBytes+i] = reservedBytes[i] + } + return nil +} + +// writeSequenceLen writes the sequence length to the first share. +func (b *Builder) WriteSequenceLen(sequenceLen uint32) error { + if b == nil { + return errors.New("the builder object is not initialized (is nil)") + } + if !b.isFirstShare { + return errors.New("not the first share") + } + sequenceLenBuf := make([]byte, appconsts.SequenceLenBytes) + binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen) + + for i := 0; i < appconsts.SequenceLenBytes; i++ { + b.rawShareData[appconsts.NamespaceSize+appconsts.ShareInfoBytes+i] = sequenceLenBuf[i] + } + + return nil +} + +// FlipSequenceStart flips the sequence start indicator of the share provided +func (b *Builder) FlipSequenceStart() { + infoByteIndex := b.indexOfInfoBytes() + + // the sequence start indicator is the last bit of the info byte so flip the + // last bit + b.rawShareData[infoByteIndex] = b.rawShareData[infoByteIndex] ^ 0x01 +} + +func (b *Builder) prepareCompactShare() error { + shareData := make([]byte, 0, appconsts.ShareSize) + infoByte, err := NewInfoByte(b.shareVersion, b.isFirstShare) + if err != nil { + return err + } + placeholderSequenceLen := make([]byte, appconsts.SequenceLenBytes) + placeholderReservedBytes := make([]byte, appconsts.CompactShareReservedBytes) + + shareData = append(shareData, b.namespace.Bytes()...) + shareData = append(shareData, byte(infoByte)) + + if b.isFirstShare { + shareData = append(shareData, placeholderSequenceLen...) + } + + shareData = append(shareData, placeholderReservedBytes...) + + b.rawShareData = shareData + + return nil +} + +func (b *Builder) prepareSparseShare() error { + shareData := make([]byte, 0, appconsts.ShareSize) + infoByte, err := NewInfoByte(b.shareVersion, b.isFirstShare) + if err != nil { + return err + } + placeholderSequenceLen := make([]byte, appconsts.SequenceLenBytes) + + shareData = append(shareData, b.namespace.Bytes()...) + shareData = append(shareData, byte(infoByte)) + + if b.isFirstShare { + shareData = append(shareData, placeholderSequenceLen...) + } + + b.rawShareData = shareData + return nil +} + +func isCompactShare(ns namespace.Namespace) bool { + return ns.IsTx() || ns.IsPayForBlob() +} diff --git a/types/share/split_sparse_shares.go b/types/share/split_sparse_shares.go new file mode 100644 index 0000000..2006446 --- /dev/null +++ b/types/share/split_sparse_shares.go @@ -0,0 +1,106 @@ +package share + +import ( + "errors" + "fmt" + + "golang.org/x/exp/slices" + + "github.com/rollkit/celestia-openrpc/types/appconsts" + "github.com/rollkit/celestia-openrpc/types/namespace" +) + +// SparseShareSplitter lazily splits blobs into shares that will eventually be +// included in a data square. It also has methods to help progressively count +// how many shares the blobs written take up. +type SparseShareSplitter struct { + shares []Share +} + +func NewSparseShareSplitter() *SparseShareSplitter { + return &SparseShareSplitter{} +} + +// Write writes the provided blob to this sparse share splitter. It returns an +// error or nil if no error is encountered. +func (sss *SparseShareSplitter) Write(shareVersion uint32, ns, data []byte) error { + if !slices.Contains(appconsts.SupportedShareVersions, uint8(shareVersion)) { + return fmt.Errorf("unsupported share version: %d", shareVersion) + } + + rawData := data + blobNamespace, err := namespace.From(ns) + if err != nil { + return err + } + + // First share + b, err := NewBuilder(blobNamespace, uint8(shareVersion), true).Init() + if err != nil { + return err + } + if err := b.WriteSequenceLen(uint32(len(rawData))); err != nil { + return err + } + + for rawData != nil { + + rawDataLeftOver := b.AddData(rawData) + if rawDataLeftOver == nil { + // Just call it on the latest share + b.ZeroPadIfNecessary() + } + + share, err := b.Build() + if err != nil { + return err + } + sss.shares = append(sss.shares, *share) + + b, err = NewBuilder(blobNamespace, uint8(shareVersion), false).Init() + if err != nil { + return err + } + rawData = rawDataLeftOver + } + + return nil +} + +// WriteNamespacePaddingShares adds padding shares with the namespace of the +// last written This is useful to follow the non-interactive default +// rules. This function assumes that at least one share has already been +// written. +func (sss *SparseShareSplitter) WriteNamespacePaddingShares(count int) error { + if count < 0 { + return errors.New("cannot write negative namespaced shares") + } + if count == 0 { + return nil + } + if len(sss.shares) == 0 { + return errors.New("cannot write namespace padding shares on an empty SparseShareSplitter") + } + lastBlob := sss.shares[len(sss.shares)-1] + lastBlobNs, err := lastBlob.Namespace() + if err != nil { + return err + } + nsPaddingShares, err := NamespacePaddingShares(lastBlobNs, count) + if err != nil { + return err + } + sss.shares = append(sss.shares, nsPaddingShares...) + + return nil +} + +// Export finalizes and returns the underlying shares. +func (sss *SparseShareSplitter) Export() []Share { + return sss.shares +} + +// Count returns the current number of shares that will be made if exporting. +func (sss *SparseShareSplitter) Count() int { + return len(sss.shares) +} diff --git a/types/share/utils.go b/types/share/utils.go new file mode 100644 index 0000000..0a4f958 --- /dev/null +++ b/types/share/utils.go @@ -0,0 +1,19 @@ +package share + +import "bytes" + +// zeroPadIfNecessary pads the share with trailing zero bytes if the provided +// share has fewer bytes than width. Returns the share unmodified if the +// len(share) is greater than or equal to width. +func zeroPadIfNecessary(share []byte, width int) (padded []byte, bytesOfPadding int) { + oldLen := len(share) + if oldLen >= width { + return share, 0 + } + + missingBytes := width - oldLen + padByte := []byte{0} + padding := bytes.Repeat(padByte, missingBytes) + share = append(share, padding...) + return share, missingBytes +}